summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/exfldio.c14
-rw-r--r--drivers/acpi/acpica/nsinit.c1
-rw-r--r--drivers/acpi/acpica/utosi.c3
-rw-r--r--drivers/bcma/driver_mips.c2
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/nvme-core.c (renamed from drivers/block/nvme.c)594
-rw-r--r--drivers/block/nvme-scsi.c3053
-rw-r--r--drivers/dma/Kconfig16
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/acpi-dma.c279
-rw-r--r--drivers/dma/at_hdmac.c97
-rw-r--r--drivers/dma/at_hdmac_regs.h4
-rw-r--r--drivers/dma/coh901318.c4
-rw-r--r--drivers/dma/dmaengine.c17
-rw-r--r--drivers/dma/dmatest.c887
-rw-r--r--drivers/dma/dw_dmac.c203
-rw-r--r--drivers/dma/dw_dmac_regs.h6
-rw-r--r--drivers/dma/imx-dma.c7
-rw-r--r--drivers/dma/imx-sdma.c4
-rw-r--r--drivers/dma/ioat/dma.c8
-rw-r--r--drivers/dma/ioat/dma.h53
-rw-r--r--drivers/dma/ioat/dma_v2.h2
-rw-r--r--drivers/dma/ioat/dma_v3.c912
-rw-r--r--drivers/dma/ioat/hw.h88
-rw-r--r--drivers/dma/ioat/pci.c20
-rw-r--r--drivers/dma/ioat/registers.h4
-rw-r--r--drivers/dma/ipu/ipu_idmac.c6
-rw-r--r--drivers/dma/of-dma.c96
-rw-r--r--drivers/dma/omap-dma.c38
-rw-r--r--drivers/dma/pch_dma.c2
-rw-r--r--drivers/dma/pl330.c10
-rw-r--r--drivers/dma/sh/Kconfig24
-rw-r--r--drivers/dma/sh/Makefile3
-rw-r--r--drivers/dma/sh/sudmac.c428
-rw-r--r--drivers/dma/sirf-dma.c24
-rw-r--r--drivers/dma/tegra20-apb-dma.c87
-rw-r--r--drivers/dma/timb_dma.c2
-rw-r--r--drivers/dma/txx9dmac.c8
-rw-r--r--drivers/edac/edac_mc_sysfs.c18
-rw-r--r--drivers/extcon/Kconfig2
-rw-r--r--drivers/firewire/core-cdev.c27
-rw-r--r--drivers/firewire/core-device.c4
-rw-r--r--drivers/firewire/net.c7
-rw-r--r--drivers/firewire/ohci.c270
-rw-r--r--drivers/firewire/sbp2.c10
-rw-r--r--drivers/gpio/Kconfig1
-rw-r--r--drivers/gpio/gpio-lpc32xx.c2
-rw-r--r--drivers/hid/hid-core.c3
-rw-r--r--drivers/hid/hid-debug.c15
-rw-r--r--drivers/hid/hid-steelseries.c9
-rw-r--r--drivers/i2c/busses/Kconfig4
-rw-r--r--drivers/i2c/muxes/Kconfig4
-rw-r--r--drivers/input/keyboard/Kconfig6
-rw-r--r--drivers/input/misc/Kconfig8
-rw-r--r--drivers/input/mouse/Kconfig2
-rw-r--r--drivers/leds/Kconfig6
-rw-r--r--drivers/md/dm-bufio.c24
-rw-r--r--drivers/md/dm-cache-metadata.c4
-rw-r--r--drivers/md/dm-cache-policy.h4
-rw-r--r--drivers/md/dm-cache-target.c100
-rw-r--r--drivers/md/dm-mpath.c1
-rw-r--r--drivers/md/dm-snap.c1
-rw-r--r--drivers/md/dm-stripe.c11
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/md/dm-thin-metadata.c36
-rw-r--r--drivers/md/dm-thin-metadata.h7
-rw-r--r--drivers/md/dm-thin.c200
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.c3
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c127
-rw-r--r--drivers/md/persistent-data/dm-space-map.h23
-rw-r--r--drivers/mtd/Kconfig13
-rw-r--r--drivers/mtd/Makefile3
-rw-r--r--drivers/mtd/chips/Kconfig1
-rw-r--r--drivers/mtd/devices/Kconfig64
-rw-r--r--drivers/mtd/devices/Makefile5
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c13
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.h59
-rw-r--r--drivers/mtd/devices/doc2000.c1178
-rw-r--r--drivers/mtd/devices/doc2001.c824
-rw-r--r--drivers/mtd/devices/doc2001plus.c1080
-rw-r--r--drivers/mtd/devices/docecc.c521
-rw-r--r--drivers/mtd/devices/docg3.c15
-rw-r--r--drivers/mtd/devices/docprobe.c325
-rw-r--r--drivers/mtd/devices/elm.c9
-rw-r--r--drivers/mtd/devices/m25p80.c25
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c4
-rw-r--r--drivers/mtd/maps/Kconfig79
-rw-r--r--drivers/mtd/maps/Makefile8
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c3
-rw-r--r--drivers/mtd/maps/ck804xrom.c3
-rw-r--r--drivers/mtd/maps/dbox2-flash.c123
-rw-r--r--drivers/mtd/maps/dc21285.c3
-rw-r--r--drivers/mtd/maps/dilnetpc.c496
-rw-r--r--drivers/mtd/maps/dmv182.c146
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c3
-rw-r--r--drivers/mtd/maps/h720x-flash.c120
-rw-r--r--drivers/mtd/maps/impa7.c7
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c4
-rw-r--r--drivers/mtd/maps/ixp2000.c253
-rw-r--r--drivers/mtd/maps/ixp4xx.c2
-rw-r--r--drivers/mtd/maps/lantiq-flash.c3
-rw-r--r--drivers/mtd/maps/mbx860.c98
-rw-r--r--drivers/mtd/maps/pci.c3
-rw-r--r--drivers/mtd/maps/physmap.c17
-rw-r--r--drivers/mtd/maps/physmap_of.c16
-rw-r--r--drivers/mtd/maps/plat-ram.c2
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c4
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c5
-rw-r--r--drivers/mtd/maps/rpxlite.c64
-rw-r--r--drivers/mtd/maps/sa1100-flash.c2
-rw-r--r--drivers/mtd/maps/solutionengine.c2
-rw-r--r--drivers/mtd/maps/tqm8xxl.c249
-rw-r--r--drivers/mtd/maps/tsunami_flash.c5
-rw-r--r--drivers/mtd/mtdchar.c52
-rw-r--r--drivers/mtd/mtdcore.c26
-rw-r--r--drivers/mtd/mtdcore.h30
-rw-r--r--drivers/mtd/mtdpart.c4
-rw-r--r--drivers/mtd/nand/Kconfig32
-rw-r--r--drivers/mtd/nand/Makefile3
-rw-r--r--drivers/mtd/nand/atmel_nand.c15
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c16
-rw-r--r--drivers/mtd/nand/cafe_nand.c10
-rw-r--r--drivers/mtd/nand/davinci_nand.c16
-rw-r--r--drivers/mtd/nand/denali_dt.c18
-rw-r--r--drivers/mtd/nand/docg4.c13
-rw-r--r--drivers/mtd/nand/fsmc_nand.c13
-rw-r--r--drivers/mtd/nand/gpio.c8
-rw-r--r--drivers/mtd/nand/h1910.c167
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c4
-rw-r--r--drivers/mtd/nand/nand_base.c233
-rw-r--r--drivers/mtd/nand/nand_bbt.c25
-rw-r--r--drivers/mtd/nand/nand_ids.c242
-rw-r--r--drivers/mtd/nand/nandsim.c24
-rw-r--r--drivers/mtd/nand/nuc900_nand.c9
-rw-r--r--drivers/mtd/nand/omap2.c9
-rw-r--r--drivers/mtd/nand/orion_nand.c13
-rw-r--r--drivers/mtd/nand/ppchameleonevb.c403
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c2
-rw-r--r--drivers/mtd/nand/rtc_from4.c624
-rw-r--r--drivers/mtd/nand/sh_flctl.c16
-rw-r--r--drivers/mtd/nand/sm_common.c62
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c13
-rw-r--r--drivers/mtd/ofpart.c7
-rw-r--r--drivers/mtd/onenand/Kconfig7
-rw-r--r--drivers/mtd/onenand/Makefile3
-rw-r--r--drivers/mtd/onenand/omap2.c14
-rw-r--r--drivers/mtd/onenand/onenand_sim.c564
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c5
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c30
-rw-r--r--drivers/net/ethernet/freescale/fec.h10
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c44
-rw-r--r--drivers/net/ethernet/sfc/ptp.c4
-rw-r--r--drivers/net/ethernet/tile/tilegx.c2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c3
-rw-r--r--drivers/net/irda/bfin_sir.c6
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/usb/cdc_ether.c7
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/usb/sierra_net.c38
-rw-r--r--drivers/net/usb/usbnet.c77
-rw-r--r--drivers/of/of_mdio.c11
-rw-r--r--drivers/pci/bus.c1
-rw-r--r--drivers/pci/msi.c6
-rw-r--r--drivers/pci/probe.c1
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c140
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig26
-rw-r--r--drivers/regulator/Kconfig2
-rw-r--r--drivers/rtc/rtc-tile.c1
-rw-r--r--drivers/spi/Kconfig8
-rw-r--r--drivers/ssb/driver_mipscore.c2
-rw-r--r--drivers/staging/android/Kconfig2
-rw-r--r--drivers/staging/iio/accel/Kconfig2
-rw-r--r--drivers/staging/iio/adc/Kconfig2
-rw-r--r--drivers/staging/iio/addac/Kconfig2
-rw-r--r--drivers/staging/iio/resolver/Kconfig4
-rw-r--r--drivers/staging/iio/trigger/Kconfig2
-rw-r--r--drivers/thermal/Kconfig28
-rw-r--r--drivers/thermal/Makefile10
-rw-r--r--drivers/thermal/armada_thermal.c232
-rw-r--r--drivers/thermal/cpu_cooling.c295
-rw-r--r--drivers/thermal/db8500_cpufreq_cooling.c2
-rw-r--r--drivers/thermal/db8500_thermal.c19
-rw-r--r--drivers/thermal/dove_thermal.c7
-rw-r--r--drivers/thermal/exynos_thermal.c196
-rw-r--r--drivers/thermal/fair_share.c15
-rw-r--r--drivers/thermal/kirkwood_thermal.c12
-rw-r--r--drivers/thermal/rcar_thermal.c34
-rw-r--r--drivers/thermal/step_wise.c26
-rw-r--r--drivers/thermal/thermal_core.c (renamed from drivers/thermal/thermal_sys.c)197
-rw-r--r--drivers/thermal/thermal_core.h27
-rw-r--r--drivers/thermal/user_space.c15
-rw-r--r--drivers/tty/serial/68328serial.c1
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c1
-rw-r--r--drivers/usb/host/ehci-tilegx.c7
-rw-r--r--drivers/usb/host/ohci-tilegx.c7
-rw-r--r--drivers/usb/phy/Kconfig2
-rw-r--r--drivers/video/Kconfig4
-rw-r--r--drivers/video/au1100fb.c22
-rw-r--r--drivers/video/backlight/Kconfig4
-rw-r--r--drivers/video/mxsfb.c8
-rw-r--r--drivers/w1/masters/Kconfig2
-rw-r--r--drivers/watchdog/ath79_wdt.c8
-rw-r--r--drivers/watchdog/davinci_wdt.c9
-rw-r--r--drivers/watchdog/s3c2410_wdt.c7
-rw-r--r--drivers/watchdog/shwdt.c7
-rw-r--r--drivers/watchdog/watchdog_dev.c3
207 files changed, 8529 insertions, 9635 deletions
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index ec7f569..c84ee95 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -720,7 +720,19 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
if ((obj_desc->common_field.start_field_bit_offset == 0) &&
(obj_desc->common_field.bit_length == access_bit_width)) {
- status = acpi_ex_field_datum_io(obj_desc, 0, buffer, ACPI_READ);
+ if (buffer_length >= sizeof(u64)) {
+ status =
+ acpi_ex_field_datum_io(obj_desc, 0, buffer,
+ ACPI_READ);
+ } else {
+ /* Use raw_datum (u64) to handle buffers < 64 bits */
+
+ status =
+ acpi_ex_field_datum_io(obj_desc, 0, &raw_datum,
+ ACPI_READ);
+ ACPI_MEMCPY(buffer, &raw_datum, buffer_length);
+ }
+
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 2a431ec..46f0f83 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -558,6 +558,7 @@ acpi_ns_init_one_device(acpi_handle obj_handle,
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
(ACPI_TYPE_METHOD, device_node, METHOD_NAME__INI));
+ ACPI_MEMSET(info, 0, sizeof(struct acpi_evaluate_info));
info->prefix_node = device_node;
info->pathname = METHOD_NAME__INI;
info->parameters = NULL;
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index b15aceb..7e80772 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -349,7 +349,8 @@ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state * walk_state)
return_value = 0;
status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
if (ACPI_FAILURE(status)) {
- return (status);
+ acpi_ut_remove_reference(return_desc);
+ return_ACPI_STATUS(status);
}
/* Lookup the interface in the global _OSI list */
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index 9a7f0e3..11115bb 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -21,7 +21,7 @@
#include <linux/serial_reg.h>
#include <linux/time.h>
-static const char *part_probes[] = { "bcm47xxpart", NULL };
+static const char * const part_probes[] = { "bcm47xxpart", NULL };
static struct physmap_flash_data bcma_pflash_data = {
.part_probe_types = part_probes,
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index a3b4023..ca07399 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -42,4 +42,5 @@ obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/
obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
+nvme-y := nvme-core.o nvme-scsi.o
swim_mod-y := swim.o swim_asm.o
diff --git a/drivers/block/nvme.c b/drivers/block/nvme-core.c
index 9dcefe4..8efdfaa 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme-core.c
@@ -39,14 +39,13 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
-
+#include <scsi/sg.h>
#include <asm-generic/io-64-nonatomic-lo-hi.h>
#define NVME_Q_DEPTH 1024
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
#define NVME_MINORS 64
-#define NVME_IO_TIMEOUT (5 * HZ)
#define ADMIN_TIMEOUT (60 * HZ)
static int nvme_major;
@@ -60,43 +59,6 @@ static LIST_HEAD(dev_list);
static struct task_struct *nvme_thread;
/*
- * Represents an NVM Express device. Each nvme_dev is a PCI function.
- */
-struct nvme_dev {
- struct list_head node;
- struct nvme_queue **queues;
- u32 __iomem *dbs;
- struct pci_dev *pci_dev;
- struct dma_pool *prp_page_pool;
- struct dma_pool *prp_small_pool;
- int instance;
- int queue_count;
- int db_stride;
- u32 ctrl_config;
- struct msix_entry *entry;
- struct nvme_bar __iomem *bar;
- struct list_head namespaces;
- char serial[20];
- char model[40];
- char firmware_rev[8];
- u32 max_hw_sectors;
-};
-
-/*
- * An NVM Express namespace is equivalent to a SCSI LUN
- */
-struct nvme_ns {
- struct list_head list;
-
- struct nvme_dev *dev;
- struct request_queue *queue;
- struct gendisk *disk;
-
- int ns_id;
- int lba_shift;
-};
-
-/*
* An NVM Express queue. Each device has at least two (one for admin
* commands and one for I/O commands).
*/
@@ -131,6 +93,7 @@ static inline void _nvme_check_size(void)
BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
@@ -261,12 +224,12 @@ static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
return ctx;
}
-static struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
+struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
{
return dev->queues[get_cpu() + 1];
}
-static void put_nvmeq(struct nvme_queue *nvmeq)
+void put_nvmeq(struct nvme_queue *nvmeq)
{
put_cpu();
}
@@ -294,22 +257,6 @@ static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
return 0;
}
-/*
- * The nvme_iod describes the data in an I/O, including the list of PRP
- * entries. You can't see it in this data structure because C doesn't let
- * me express that. Use nvme_alloc_iod to ensure there's enough space
- * allocated to store the PRP list.
- */
-struct nvme_iod {
- void *private; /* For the use of the submitter of the I/O */
- int npages; /* In the PRP list. 0 means small pool in use */
- int offset; /* Of PRP list */
- int nents; /* Used in scatterlist */
- int length; /* Of data, in bytes */
- dma_addr_t first_dma;
- struct scatterlist sg[0];
-};
-
static __le64 **iod_list(struct nvme_iod *iod)
{
return ((void *)iod) + iod->offset;
@@ -343,7 +290,7 @@ nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
return iod;
}
-static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
+void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
{
const int last_prp = PAGE_SIZE / 8 - 1;
int i;
@@ -361,16 +308,6 @@ static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
kfree(iod);
}
-static void requeue_bio(struct nvme_dev *dev, struct bio *bio)
-{
- struct nvme_queue *nvmeq = get_nvmeq(dev);
- if (bio_list_empty(&nvmeq->sq_cong))
- add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
- bio_list_add(&nvmeq->sq_cong, bio);
- put_nvmeq(nvmeq);
- wake_up_process(nvme_thread);
-}
-
static void bio_completion(struct nvme_dev *dev, void *ctx,
struct nvme_completion *cqe)
{
@@ -382,19 +319,15 @@ static void bio_completion(struct nvme_dev *dev, void *ctx,
dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
nvme_free_iod(dev, iod);
- if (status) {
+ if (status)
bio_endio(bio, -EIO);
- } else if (bio->bi_vcnt > bio->bi_idx) {
- requeue_bio(dev, bio);
- } else {
+ else
bio_endio(bio, 0);
- }
}
/* length is in bytes. gfp flags indicates whether we may sleep. */
-static int nvme_setup_prps(struct nvme_dev *dev,
- struct nvme_common_command *cmd, struct nvme_iod *iod,
- int total_len, gfp_t gfp)
+int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
+ struct nvme_iod *iod, int total_len, gfp_t gfp)
{
struct dma_pool *pool;
int length = total_len;
@@ -473,43 +406,193 @@ static int nvme_setup_prps(struct nvme_dev *dev,
return total_len;
}
+struct nvme_bio_pair {
+ struct bio b1, b2, *parent;
+ struct bio_vec *bv1, *bv2;
+ int err;
+ atomic_t cnt;
+};
+
+static void nvme_bio_pair_endio(struct bio *bio, int err)
+{
+ struct nvme_bio_pair *bp = bio->bi_private;
+
+ if (err)
+ bp->err = err;
+
+ if (atomic_dec_and_test(&bp->cnt)) {
+ bio_endio(bp->parent, bp->err);
+ if (bp->bv1)
+ kfree(bp->bv1);
+ if (bp->bv2)
+ kfree(bp->bv2);
+ kfree(bp);
+ }
+}
+
+static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx,
+ int len, int offset)
+{
+ struct nvme_bio_pair *bp;
+
+ BUG_ON(len > bio->bi_size);
+ BUG_ON(idx > bio->bi_vcnt);
+
+ bp = kmalloc(sizeof(*bp), GFP_ATOMIC);
+ if (!bp)
+ return NULL;
+ bp->err = 0;
+
+ bp->b1 = *bio;
+ bp->b2 = *bio;
+
+ bp->b1.bi_size = len;
+ bp->b2.bi_size -= len;
+ bp->b1.bi_vcnt = idx;
+ bp->b2.bi_idx = idx;
+ bp->b2.bi_sector += len >> 9;
+
+ if (offset) {
+ bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
+ GFP_ATOMIC);
+ if (!bp->bv1)
+ goto split_fail_1;
+
+ bp->bv2 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
+ GFP_ATOMIC);
+ if (!bp->bv2)
+ goto split_fail_2;
+
+ memcpy(bp->bv1, bio->bi_io_vec,
+ bio->bi_max_vecs * sizeof(struct bio_vec));
+ memcpy(bp->bv2, bio->bi_io_vec,
+ bio->bi_max_vecs * sizeof(struct bio_vec));
+
+ bp->b1.bi_io_vec = bp->bv1;
+ bp->b2.bi_io_vec = bp->bv2;
+ bp->b2.bi_io_vec[idx].bv_offset += offset;
+ bp->b2.bi_io_vec[idx].bv_len -= offset;
+ bp->b1.bi_io_vec[idx].bv_len = offset;
+ bp->b1.bi_vcnt++;
+ } else
+ bp->bv1 = bp->bv2 = NULL;
+
+ bp->b1.bi_private = bp;
+ bp->b2.bi_private = bp;
+
+ bp->b1.bi_end_io = nvme_bio_pair_endio;
+ bp->b2.bi_end_io = nvme_bio_pair_endio;
+
+ bp->parent = bio;
+ atomic_set(&bp->cnt, 2);
+
+ return bp;
+
+ split_fail_2:
+ kfree(bp->bv1);
+ split_fail_1:
+ kfree(bp);
+ return NULL;
+}
+
+static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
+ int idx, int len, int offset)
+{
+ struct nvme_bio_pair *bp = nvme_bio_split(bio, idx, len, offset);
+ if (!bp)
+ return -ENOMEM;
+
+ if (bio_list_empty(&nvmeq->sq_cong))
+ add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+ bio_list_add(&nvmeq->sq_cong, &bp->b1);
+ bio_list_add(&nvmeq->sq_cong, &bp->b2);
+
+ return 0;
+}
+
/* NVMe scatterlists require no holes in the virtual address */
#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \
(((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))
-static int nvme_map_bio(struct device *dev, struct nvme_iod *iod,
+static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
struct bio *bio, enum dma_data_direction dma_dir, int psegs)
{
struct bio_vec *bvec, *bvprv = NULL;
struct scatterlist *sg = NULL;
- int i, old_idx, length = 0, nsegs = 0;
+ int i, length = 0, nsegs = 0, split_len = bio->bi_size;
+
+ if (nvmeq->dev->stripe_size)
+ split_len = nvmeq->dev->stripe_size -
+ ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1));
sg_init_table(iod->sg, psegs);
- old_idx = bio->bi_idx;
bio_for_each_segment(bvec, bio, i) {
if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
sg->length += bvec->bv_len;
} else {
if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
- break;
+ return nvme_split_and_submit(bio, nvmeq, i,
+ length, 0);
+
sg = sg ? sg + 1 : iod->sg;
sg_set_page(sg, bvec->bv_page, bvec->bv_len,
bvec->bv_offset);
nsegs++;
}
+
+ if (split_len - length < bvec->bv_len)
+ return nvme_split_and_submit(bio, nvmeq, i, split_len,
+ split_len - length);
length += bvec->bv_len;
bvprv = bvec;
}
- bio->bi_idx = i;
iod->nents = nsegs;
sg_mark_end(sg);
- if (dma_map_sg(dev, iod->sg, iod->nents, dma_dir) == 0) {
- bio->bi_idx = old_idx;
+ if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0)
return -ENOMEM;
- }
+
+ BUG_ON(length != bio->bi_size);
return length;
}
+/*
+ * We reuse the small pool to allocate the 16-byte range here as it is not
+ * worth having a special pool for these or additional cases to handle freeing
+ * the iod.
+ */
+static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
+ struct bio *bio, struct nvme_iod *iod, int cmdid)
+{
+ struct nvme_dsm_range *range;
+ struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+
+ range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC,
+ &iod->first_dma);
+ if (!range)
+ return -ENOMEM;
+
+ iod_list(iod)[0] = (__le64 *)range;
+ iod->npages = 0;
+
+ range->cattr = cpu_to_le32(0);
+ range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift);
+ range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
+
+ memset(cmnd, 0, sizeof(*cmnd));
+ cmnd->dsm.opcode = nvme_cmd_dsm;
+ cmnd->dsm.command_id = cmdid;
+ cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
+ cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma);
+ cmnd->dsm.nr = 0;
+ cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
+
+ if (++nvmeq->sq_tail == nvmeq->q_depth)
+ nvmeq->sq_tail = 0;
+ writel(nvmeq->sq_tail, nvmeq->q_db);
+
+ return 0;
+}
+
static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
int cmdid)
{
@@ -527,7 +610,7 @@ static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
return 0;
}
-static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
+int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
{
int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
special_completion, NVME_IO_TIMEOUT);
@@ -567,6 +650,12 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
if (unlikely(cmdid < 0))
goto free_iod;
+ if (bio->bi_rw & REQ_DISCARD) {
+ result = nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
+ if (result)
+ goto free_cmdid;
+ return result;
+ }
if ((bio->bi_rw & REQ_FLUSH) && !psegs)
return nvme_submit_flush(nvmeq, ns, cmdid);
@@ -591,8 +680,8 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
dma_dir = DMA_FROM_DEVICE;
}
- result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs);
- if (result < 0)
+ result = nvme_map_bio(nvmeq, iod, bio, dma_dir, psegs);
+ if (result <= 0)
goto free_cmdid;
length = result;
@@ -600,13 +689,11 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
GFP_ATOMIC);
- cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
+ cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
cmnd->rw.control = cpu_to_le16(control);
cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
- bio->bi_sector += length >> 9;
-
if (++nvmeq->sq_tail == nvmeq->q_depth)
nvmeq->sq_tail = 0;
writel(nvmeq->sq_tail, nvmeq->q_db);
@@ -724,8 +811,8 @@ static void sync_completion(struct nvme_dev *dev, void *ctx,
* Returns 0 on success. If the result is negative, it's a Linux error code;
* if the result is positive, it's an NVM Express status code
*/
-static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
- struct nvme_command *cmd, u32 *result, unsigned timeout)
+int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
+ u32 *result, unsigned timeout)
{
int cmdid;
struct sync_cmd_info cmdinfo;
@@ -741,7 +828,7 @@ static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
set_current_state(TASK_KILLABLE);
nvme_submit_cmd(nvmeq, cmd);
- schedule();
+ schedule_timeout(timeout);
if (cmdinfo.status == -EINTR) {
nvme_abort_command(nvmeq, cmdid);
@@ -754,7 +841,7 @@ static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
return cmdinfo.status;
}
-static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
+int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
u32 *result)
{
return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
@@ -827,7 +914,7 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
}
-static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
+int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
dma_addr_t dma_addr)
{
struct nvme_command c;
@@ -841,7 +928,7 @@ static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
return nvme_submit_admin_cmd(dev, &c, NULL);
}
-static int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
+int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
dma_addr_t dma_addr, u32 *result)
{
struct nvme_command c;
@@ -855,8 +942,8 @@ static int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
return nvme_submit_admin_cmd(dev, &c, result);
}
-static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
- unsigned dword11, dma_addr_t dma_addr, u32 *result)
+int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
+ dma_addr_t dma_addr, u32 *result)
{
struct nvme_command c;
@@ -885,7 +972,7 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
void *ctx;
nvme_completion_fn fn;
static struct nvme_completion cqe = {
- .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1,
+ .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1),
};
if (timeout && !time_after(now, info[cmdid].timeout))
@@ -966,7 +1053,7 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
return nvmeq;
free_cqdma:
- dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes,
+ dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes,
nvmeq->cq_dma_addr);
free_nvmeq:
kfree(nvmeq);
@@ -1021,15 +1108,60 @@ static struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, int qid,
return ERR_PTR(result);
}
+static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled)
+{
+ unsigned long timeout;
+ u32 bit = enabled ? NVME_CSTS_RDY : 0;
+
+ timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
+
+ while ((readl(&dev->bar->csts) & NVME_CSTS_RDY) != bit) {
+ msleep(100);
+ if (fatal_signal_pending(current))
+ return -EINTR;
+ if (time_after(jiffies, timeout)) {
+ dev_err(&dev->pci_dev->dev,
+ "Device not ready; aborting initialisation\n");
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * If the device has been passed off to us in an enabled state, just clear
+ * the enabled bit. The spec says we should set the 'shutdown notification
+ * bits', but doing so may cause the device to complete commands to the
+ * admin queue ... and we don't know what memory that might be pointing at!
+ */
+static int nvme_disable_ctrl(struct nvme_dev *dev, u64 cap)
+{
+ u32 cc = readl(&dev->bar->cc);
+
+ if (cc & NVME_CC_ENABLE)
+ writel(cc & ~NVME_CC_ENABLE, &dev->bar->cc);
+ return nvme_wait_ready(dev, cap, false);
+}
+
+static int nvme_enable_ctrl(struct nvme_dev *dev, u64 cap)
+{
+ return nvme_wait_ready(dev, cap, true);
+}
+
static int nvme_configure_admin_queue(struct nvme_dev *dev)
{
- int result = 0;
+ int result;
u32 aqa;
- u64 cap;
- unsigned long timeout;
+ u64 cap = readq(&dev->bar->cap);
struct nvme_queue *nvmeq;
dev->dbs = ((void __iomem *)dev->bar) + 4096;
+ dev->db_stride = NVME_CAP_STRIDE(cap);
+
+ result = nvme_disable_ctrl(dev, cap);
+ if (result < 0)
+ return result;
nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
if (!nvmeq)
@@ -1043,38 +1175,28 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
- writel(0, &dev->bar->cc);
writel(aqa, &dev->bar->aqa);
writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
writel(dev->ctrl_config, &dev->bar->cc);
- cap = readq(&dev->bar->cap);
- timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
- dev->db_stride = NVME_CAP_STRIDE(cap);
-
- while (!result && !(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
- msleep(100);
- if (fatal_signal_pending(current))
- result = -EINTR;
- if (time_after(jiffies, timeout)) {
- dev_err(&dev->pci_dev->dev,
- "Device not ready; aborting initialisation\n");
- result = -ENODEV;
- }
- }
-
- if (result) {
- nvme_free_queue_mem(nvmeq);
- return result;
- }
+ result = nvme_enable_ctrl(dev, cap);
+ if (result)
+ goto free_q;
result = queue_request_irq(dev, nvmeq, "nvme admin");
+ if (result)
+ goto free_q;
+
dev->queues[0] = nvmeq;
return result;
+
+ free_q:
+ nvme_free_queue_mem(nvmeq);
+ return result;
}
-static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
+struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
unsigned long addr, unsigned length)
{
int i, err, count, nents, offset;
@@ -1130,7 +1252,7 @@ static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
return ERR_PTR(err);
}
-static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
+void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
struct nvme_iod *iod)
{
int i;
@@ -1148,13 +1270,19 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
struct nvme_queue *nvmeq;
struct nvme_user_io io;
struct nvme_command c;
- unsigned length;
- int status;
- struct nvme_iod *iod;
+ unsigned length, meta_len;
+ int status, i;
+ struct nvme_iod *iod, *meta_iod = NULL;
+ dma_addr_t meta_dma_addr;
+ void *meta, *uninitialized_var(meta_mem);
if (copy_from_user(&io, uio, sizeof(io)))
return -EFAULT;
length = (io.nblocks + 1) << ns->lba_shift;
+ meta_len = (io.nblocks + 1) * ns->ms;
+
+ if (meta_len && ((io.metadata & 3) || !io.metadata))
+ return -EINVAL;
switch (io.opcode) {
case nvme_cmd_write:
@@ -1176,11 +1304,42 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
c.rw.slba = cpu_to_le64(io.slba);
c.rw.length = cpu_to_le16(io.nblocks);
c.rw.control = cpu_to_le16(io.control);
- c.rw.dsmgmt = cpu_to_le16(io.dsmgmt);
- c.rw.reftag = io.reftag;
- c.rw.apptag = io.apptag;
- c.rw.appmask = io.appmask;
- /* XXX: metadata */
+ c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
+ c.rw.reftag = cpu_to_le32(io.reftag);
+ c.rw.apptag = cpu_to_le16(io.apptag);
+ c.rw.appmask = cpu_to_le16(io.appmask);
+
+ if (meta_len) {
+ meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata, meta_len);
+ if (IS_ERR(meta_iod)) {
+ status = PTR_ERR(meta_iod);
+ meta_iod = NULL;
+ goto unmap;
+ }
+
+ meta_mem = dma_alloc_coherent(&dev->pci_dev->dev, meta_len,
+ &meta_dma_addr, GFP_KERNEL);
+ if (!meta_mem) {
+ status = -ENOMEM;
+ goto unmap;
+ }
+
+ if (io.opcode & 1) {
+ int meta_offset = 0;
+
+ for (i = 0; i < meta_iod->nents; i++) {
+ meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
+ meta_iod->sg[i].offset;
+ memcpy(meta_mem + meta_offset, meta,
+ meta_iod->sg[i].length);
+ kunmap_atomic(meta);
+ meta_offset += meta_iod->sg[i].length;
+ }
+ }
+
+ c.rw.metadata = cpu_to_le64(meta_dma_addr);
+ }
+
length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL);
nvmeq = get_nvmeq(dev);
@@ -1196,8 +1355,33 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
else
status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+ if (meta_len) {
+ if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) {
+ int meta_offset = 0;
+
+ for (i = 0; i < meta_iod->nents; i++) {
+ meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
+ meta_iod->sg[i].offset;
+ memcpy(meta, meta_mem + meta_offset,
+ meta_iod->sg[i].length);
+ kunmap_atomic(meta);
+ meta_offset += meta_iod->sg[i].length;
+ }
+ }
+
+ dma_free_coherent(&dev->pci_dev->dev, meta_len, meta_mem,
+ meta_dma_addr);
+ }
+
+ unmap:
nvme_unmap_user_pages(dev, io.opcode & 1, iod);
nvme_free_iod(dev, iod);
+
+ if (meta_iod) {
+ nvme_unmap_user_pages(dev, io.opcode & 1, meta_iod);
+ nvme_free_iod(dev, meta_iod);
+ }
+
return status;
}
@@ -1208,6 +1392,7 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
struct nvme_command c;
int status, length;
struct nvme_iod *uninitialized_var(iod);
+ unsigned timeout;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -1237,10 +1422,13 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
GFP_KERNEL);
}
+ timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) :
+ ADMIN_TIMEOUT;
if (length != cmd.data_len)
status = -ENOMEM;
else
- status = nvme_submit_admin_cmd(dev, &c, &cmd.result);
+ status = nvme_submit_sync_cmd(dev->queues[0], &c, &cmd.result,
+ timeout);
if (cmd.data_len) {
nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
@@ -1266,6 +1454,10 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
return nvme_user_admin_cmd(ns->dev, (void __user *)arg);
case NVME_IOCTL_SUBMIT_IO:
return nvme_submit_io(ns, (void __user *)arg);
+ case SG_GET_VERSION_NUM:
+ return nvme_sg_get_version_num((void __user *)arg);
+ case SG_IO:
+ return nvme_sg_io(ns, (void __user *)arg);
default:
return -ENOTTY;
}
@@ -1282,13 +1474,17 @@ static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
while (bio_list_peek(&nvmeq->sq_cong)) {
struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
+
+ if (bio_list_empty(&nvmeq->sq_cong))
+ remove_wait_queue(&nvmeq->sq_full,
+ &nvmeq->sq_cong_wait);
if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
+ if (bio_list_empty(&nvmeq->sq_cong))
+ add_wait_queue(&nvmeq->sq_full,
+ &nvmeq->sq_cong_wait);
bio_list_add_head(&nvmeq->sq_cong, bio);
break;
}
- if (bio_list_empty(&nvmeq->sq_cong))
- remove_wait_queue(&nvmeq->sq_full,
- &nvmeq->sq_cong_wait);
}
}
@@ -1297,7 +1493,7 @@ static int nvme_kthread(void *data)
struct nvme_dev *dev;
while (!kthread_should_stop()) {
- __set_current_state(TASK_RUNNING);
+ set_current_state(TASK_INTERRUPTIBLE);
spin_lock(&dev_list_lock);
list_for_each_entry(dev, &dev_list, node) {
int i;
@@ -1314,8 +1510,7 @@ static int nvme_kthread(void *data)
}
}
spin_unlock(&dev_list_lock);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ);
+ schedule_timeout(round_jiffies_relative(HZ));
}
return 0;
}
@@ -1347,6 +1542,16 @@ static void nvme_put_ns_idx(int index)
spin_unlock(&dev_list_lock);
}
+static void nvme_config_discard(struct nvme_ns *ns)
+{
+ u32 logical_block_size = queue_logical_block_size(ns->queue);
+ ns->queue->limits.discard_zeroes_data = 0;
+ ns->queue->limits.discard_alignment = logical_block_size;
+ ns->queue->limits.discard_granularity = logical_block_size;
+ ns->queue->limits.max_discard_sectors = 0xffffffff;
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
+}
+
static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
{
@@ -1366,7 +1571,6 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
-/* queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); */
blk_queue_make_request(ns->queue, nvme_make_request);
ns->dev = dev;
ns->queue->queuedata = ns;
@@ -1378,6 +1582,7 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
ns->disk = disk;
lbaf = id->flbas & 0xf;
ns->lba_shift = id->lbaf[lbaf].ds;
+ ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
if (dev->max_hw_sectors)
blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
@@ -1392,6 +1597,9 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
+ if (dev->oncs & NVME_CTRL_ONCS_DSM)
+ nvme_config_discard(ns);
+
return ns;
out_free_queue:
@@ -1496,14 +1704,21 @@ static void nvme_free_queues(struct nvme_dev *dev)
nvme_free_queue(dev, i);
}
+/*
+ * Return: error value if an error occurred setting up the queues or calling
+ * Identify Device. 0 if these succeeded, even if adding some of the
+ * namespaces failed. At the moment, these failures are silent. TBD which
+ * failures should be reported.
+ */
static int nvme_dev_add(struct nvme_dev *dev)
{
int res, nn, i;
- struct nvme_ns *ns, *next;
+ struct nvme_ns *ns;
struct nvme_id_ctrl *ctrl;
struct nvme_id_ns *id_ns;
void *mem;
dma_addr_t dma_addr;
+ int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
res = nvme_setup_io_queues(dev);
if (res)
@@ -1511,22 +1726,26 @@ static int nvme_dev_add(struct nvme_dev *dev)
mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
res = nvme_identify(dev, 0, 1, dma_addr);
if (res) {
res = -EIO;
- goto out_free;
+ goto out;
}
ctrl = mem;
nn = le32_to_cpup(&ctrl->nn);
+ dev->oncs = le16_to_cpup(&ctrl->oncs);
memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
- if (ctrl->mdts) {
- int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
+ if (ctrl->mdts)
dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
- }
+ if ((dev->pci_dev->vendor == PCI_VENDOR_ID_INTEL) &&
+ (dev->pci_dev->device == 0x0953) && ctrl->vs[3])
+ dev->stripe_size = 1 << (ctrl->vs[3] + shift);
id_ns = mem;
for (i = 1; i <= nn; i++) {
@@ -1548,14 +1767,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
}
list_for_each_entry(ns, &dev->namespaces, list)
add_disk(ns->disk);
-
- goto out;
-
- out_free:
- list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
- list_del(&ns->list);
- nvme_ns_free(ns);
- }
+ res = 0;
out:
dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
@@ -1634,6 +1846,56 @@ static void nvme_release_instance(struct nvme_dev *dev)
spin_unlock(&dev_list_lock);
}
+static void nvme_free_dev(struct kref *kref)
+{
+ struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
+ nvme_dev_remove(dev);
+ pci_disable_msix(dev->pci_dev);
+ iounmap(dev->bar);
+ nvme_release_instance(dev);
+ nvme_release_prp_pools(dev);
+ pci_disable_device(dev->pci_dev);
+ pci_release_regions(dev->pci_dev);
+ kfree(dev->queues);
+ kfree(dev->entry);
+ kfree(dev);
+}
+
+static int nvme_dev_open(struct inode *inode, struct file *f)
+{
+ struct nvme_dev *dev = container_of(f->private_data, struct nvme_dev,
+ miscdev);
+ kref_get(&dev->kref);
+ f->private_data = dev;
+ return 0;
+}
+
+static int nvme_dev_release(struct inode *inode, struct file *f)
+{
+ struct nvme_dev *dev = f->private_data;
+ kref_put(&dev->kref, nvme_free_dev);
+ return 0;
+}
+
+static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+ struct nvme_dev *dev = f->private_data;
+ switch (cmd) {
+ case NVME_IOCTL_ADMIN_CMD:
+ return nvme_user_admin_cmd(dev, (void __user *)arg);
+ default:
+ return -ENOTTY;
+ }
+}
+
+static const struct file_operations nvme_dev_fops = {
+ .owner = THIS_MODULE,
+ .open = nvme_dev_open,
+ .release = nvme_dev_release,
+ .unlocked_ioctl = nvme_dev_ioctl,
+ .compat_ioctl = nvme_dev_ioctl,
+};
+
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int bars, result = -ENOMEM;
@@ -1692,8 +1954,20 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result)
goto delete;
+ scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance);
+ dev->miscdev.minor = MISC_DYNAMIC_MINOR;
+ dev->miscdev.parent = &pdev->dev;
+ dev->miscdev.name = dev->name;
+ dev->miscdev.fops = &nvme_dev_fops;
+ result = misc_register(&dev->miscdev);
+ if (result)
+ goto remove;
+
+ kref_init(&dev->kref);
return 0;
+ remove:
+ nvme_dev_remove(dev);
delete:
spin_lock(&dev_list_lock);
list_del(&dev->node);
@@ -1719,16 +1993,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
static void nvme_remove(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- nvme_dev_remove(dev);
- pci_disable_msix(pdev);
- iounmap(dev->bar);
- nvme_release_instance(dev);
- nvme_release_prp_pools(dev);
- pci_disable_device(pdev);
- pci_release_regions(pdev);
- kfree(dev->queues);
- kfree(dev->entry);
- kfree(dev);
+ misc_deregister(&dev->miscdev);
+ kref_put(&dev->kref, nvme_free_dev);
}
/* These functions are yet to be implemented */
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
new file mode 100644
index 0000000..fed54b0
--- /dev/null
+++ b/drivers/block/nvme-scsi.c
@@ -0,0 +1,3053 @@
+/*
+ * NVM Express device driver
+ * Copyright (c) 2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/*
+ * Refer to the SCSI-NVMe Translation spec for details on how
+ * each command is translated.
+ */
+
+#include <linux/nvme.h>
+#include <linux/bio.h>
+#include <linux/bitops.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kdev_t.h>
+#include <linux/kthread.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/poison.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <scsi/sg.h>
+#include <scsi/scsi.h>
+
+
+static int sg_version_num = 30534; /* 2 digits for each component */
+
+#define SNTI_TRANSLATION_SUCCESS 0
+#define SNTI_INTERNAL_ERROR 1
+
+/* VPD Page Codes */
+#define VPD_SUPPORTED_PAGES 0x00
+#define VPD_SERIAL_NUMBER 0x80
+#define VPD_DEVICE_IDENTIFIERS 0x83
+#define VPD_EXTENDED_INQUIRY 0x86
+#define VPD_BLOCK_DEV_CHARACTERISTICS 0xB1
+
+/* CDB offsets */
+#define REPORT_LUNS_CDB_ALLOC_LENGTH_OFFSET 6
+#define REPORT_LUNS_SR_OFFSET 2
+#define READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET 10
+#define REQUEST_SENSE_CDB_ALLOC_LENGTH_OFFSET 4
+#define REQUEST_SENSE_DESC_OFFSET 1
+#define REQUEST_SENSE_DESC_MASK 0x01
+#define DESCRIPTOR_FORMAT_SENSE_DATA_TYPE 1
+#define INQUIRY_EVPD_BYTE_OFFSET 1
+#define INQUIRY_PAGE_CODE_BYTE_OFFSET 2
+#define INQUIRY_EVPD_BIT_MASK 1
+#define INQUIRY_CDB_ALLOCATION_LENGTH_OFFSET 3
+#define START_STOP_UNIT_CDB_IMMED_OFFSET 1
+#define START_STOP_UNIT_CDB_IMMED_MASK 0x1
+#define START_STOP_UNIT_CDB_POWER_COND_MOD_OFFSET 3
+#define START_STOP_UNIT_CDB_POWER_COND_MOD_MASK 0xF
+#define START_STOP_UNIT_CDB_POWER_COND_OFFSET 4
+#define START_STOP_UNIT_CDB_POWER_COND_MASK 0xF0
+#define START_STOP_UNIT_CDB_NO_FLUSH_OFFSET 4
+#define START_STOP_UNIT_CDB_NO_FLUSH_MASK 0x4
+#define START_STOP_UNIT_CDB_START_OFFSET 4
+#define START_STOP_UNIT_CDB_START_MASK 0x1
+#define WRITE_BUFFER_CDB_MODE_OFFSET 1
+#define WRITE_BUFFER_CDB_MODE_MASK 0x1F
+#define WRITE_BUFFER_CDB_BUFFER_ID_OFFSET 2
+#define WRITE_BUFFER_CDB_BUFFER_OFFSET_OFFSET 3
+#define WRITE_BUFFER_CDB_PARM_LIST_LENGTH_OFFSET 6
+#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_OFFSET 1
+#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_MASK 0xC0
+#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_SHIFT 6
+#define FORMAT_UNIT_CDB_LONG_LIST_OFFSET 1
+#define FORMAT_UNIT_CDB_LONG_LIST_MASK 0x20
+#define FORMAT_UNIT_CDB_FORMAT_DATA_OFFSET 1
+#define FORMAT_UNIT_CDB_FORMAT_DATA_MASK 0x10
+#define FORMAT_UNIT_SHORT_PARM_LIST_LEN 4
+#define FORMAT_UNIT_LONG_PARM_LIST_LEN 8
+#define FORMAT_UNIT_PROT_INT_OFFSET 3
+#define FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET 0
+#define FORMAT_UNIT_PROT_FIELD_USAGE_MASK 0x07
+#define UNMAP_CDB_PARAM_LIST_LENGTH_OFFSET 7
+
+/* Misc. defines */
+#define NIBBLE_SHIFT 4
+#define FIXED_SENSE_DATA 0x70
+#define DESC_FORMAT_SENSE_DATA 0x72
+#define FIXED_SENSE_DATA_ADD_LENGTH 10
+#define LUN_ENTRY_SIZE 8
+#define LUN_DATA_HEADER_SIZE 8
+#define ALL_LUNS_RETURNED 0x02
+#define ALL_WELL_KNOWN_LUNS_RETURNED 0x01
+#define RESTRICTED_LUNS_RETURNED 0x00
+#define NVME_POWER_STATE_START_VALID 0x00
+#define NVME_POWER_STATE_ACTIVE 0x01
+#define NVME_POWER_STATE_IDLE 0x02
+#define NVME_POWER_STATE_STANDBY 0x03
+#define NVME_POWER_STATE_LU_CONTROL 0x07
+#define POWER_STATE_0 0
+#define POWER_STATE_1 1
+#define POWER_STATE_2 2
+#define POWER_STATE_3 3
+#define DOWNLOAD_SAVE_ACTIVATE 0x05
+#define DOWNLOAD_SAVE_DEFER_ACTIVATE 0x0E
+#define ACTIVATE_DEFERRED_MICROCODE 0x0F
+#define FORMAT_UNIT_IMMED_MASK 0x2
+#define FORMAT_UNIT_IMMED_OFFSET 1
+#define KELVIN_TEMP_FACTOR 273
+#define FIXED_FMT_SENSE_DATA_SIZE 18
+#define DESC_FMT_SENSE_DATA_SIZE 8
+
+/* SCSI/NVMe defines and bit masks */
+#define INQ_STANDARD_INQUIRY_PAGE 0x00
+#define INQ_SUPPORTED_VPD_PAGES_PAGE 0x00
+#define INQ_UNIT_SERIAL_NUMBER_PAGE 0x80
+#define INQ_DEVICE_IDENTIFICATION_PAGE 0x83
+#define INQ_EXTENDED_INQUIRY_DATA_PAGE 0x86
+#define INQ_BDEV_CHARACTERISTICS_PAGE 0xB1
+#define INQ_SERIAL_NUMBER_LENGTH 0x14
+#define INQ_NUM_SUPPORTED_VPD_PAGES 5
+#define VERSION_SPC_4 0x06
+#define ACA_UNSUPPORTED 0
+#define STANDARD_INQUIRY_LENGTH 36
+#define ADDITIONAL_STD_INQ_LENGTH 31
+#define EXTENDED_INQUIRY_DATA_PAGE_LENGTH 0x3C
+#define RESERVED_FIELD 0
+
+/* SCSI READ/WRITE Defines */
+#define IO_CDB_WP_MASK 0xE0
+#define IO_CDB_WP_SHIFT 5
+#define IO_CDB_FUA_MASK 0x8
+#define IO_6_CDB_LBA_OFFSET 0
+#define IO_6_CDB_LBA_MASK 0x001FFFFF
+#define IO_6_CDB_TX_LEN_OFFSET 4
+#define IO_6_DEFAULT_TX_LEN 256
+#define IO_10_CDB_LBA_OFFSET 2
+#define IO_10_CDB_TX_LEN_OFFSET 7
+#define IO_10_CDB_WP_OFFSET 1
+#define IO_10_CDB_FUA_OFFSET 1
+#define IO_12_CDB_LBA_OFFSET 2
+#define IO_12_CDB_TX_LEN_OFFSET 6
+#define IO_12_CDB_WP_OFFSET 1
+#define IO_12_CDB_FUA_OFFSET 1
+#define IO_16_CDB_FUA_OFFSET 1
+#define IO_16_CDB_WP_OFFSET 1
+#define IO_16_CDB_LBA_OFFSET 2
+#define IO_16_CDB_TX_LEN_OFFSET 10
+
+/* Mode Sense/Select defines */
+#define MODE_PAGE_INFO_EXCEP 0x1C
+#define MODE_PAGE_CACHING 0x08
+#define MODE_PAGE_CONTROL 0x0A
+#define MODE_PAGE_POWER_CONDITION 0x1A
+#define MODE_PAGE_RETURN_ALL 0x3F
+#define MODE_PAGE_BLK_DES_LEN 0x08
+#define MODE_PAGE_LLBAA_BLK_DES_LEN 0x10
+#define MODE_PAGE_CACHING_LEN 0x14
+#define MODE_PAGE_CONTROL_LEN 0x0C
+#define MODE_PAGE_POW_CND_LEN 0x28
+#define MODE_PAGE_INF_EXC_LEN 0x0C
+#define MODE_PAGE_ALL_LEN 0x54
+#define MODE_SENSE6_MPH_SIZE 4
+#define MODE_SENSE6_ALLOC_LEN_OFFSET 4
+#define MODE_SENSE_PAGE_CONTROL_OFFSET 2
+#define MODE_SENSE_PAGE_CONTROL_MASK 0xC0
+#define MODE_SENSE_PAGE_CODE_OFFSET 2
+#define MODE_SENSE_PAGE_CODE_MASK 0x3F
+#define MODE_SENSE_LLBAA_OFFSET 1
+#define MODE_SENSE_LLBAA_MASK 0x10
+#define MODE_SENSE_LLBAA_SHIFT 4
+#define MODE_SENSE_DBD_OFFSET 1
+#define MODE_SENSE_DBD_MASK 8
+#define MODE_SENSE_DBD_SHIFT 3
+#define MODE_SENSE10_MPH_SIZE 8
+#define MODE_SENSE10_ALLOC_LEN_OFFSET 7
+#define MODE_SELECT_CDB_PAGE_FORMAT_OFFSET 1
+#define MODE_SELECT_CDB_SAVE_PAGES_OFFSET 1
+#define MODE_SELECT_6_CDB_PARAM_LIST_LENGTH_OFFSET 4
+#define MODE_SELECT_10_CDB_PARAM_LIST_LENGTH_OFFSET 7
+#define MODE_SELECT_CDB_PAGE_FORMAT_MASK 0x10
+#define MODE_SELECT_CDB_SAVE_PAGES_MASK 0x1
+#define MODE_SELECT_6_BD_OFFSET 3
+#define MODE_SELECT_10_BD_OFFSET 6
+#define MODE_SELECT_10_LLBAA_OFFSET 4
+#define MODE_SELECT_10_LLBAA_MASK 1
+#define MODE_SELECT_6_MPH_SIZE 4
+#define MODE_SELECT_10_MPH_SIZE 8
+#define CACHING_MODE_PAGE_WCE_MASK 0x04
+#define MODE_SENSE_BLK_DESC_ENABLED 0
+#define MODE_SENSE_BLK_DESC_COUNT 1
+#define MODE_SELECT_PAGE_CODE_MASK 0x3F
+#define SHORT_DESC_BLOCK 8
+#define LONG_DESC_BLOCK 16
+#define MODE_PAGE_POW_CND_LEN_FIELD 0x26
+#define MODE_PAGE_INF_EXC_LEN_FIELD 0x0A
+#define MODE_PAGE_CACHING_LEN_FIELD 0x12
+#define MODE_PAGE_CONTROL_LEN_FIELD 0x0A
+#define MODE_SENSE_PC_CURRENT_VALUES 0
+
+/* Log Sense defines */
+#define LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE 0x00
+#define LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH 0x07
+#define LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE 0x2F
+#define LOG_PAGE_TEMPERATURE_PAGE 0x0D
+#define LOG_SENSE_CDB_SP_OFFSET 1
+#define LOG_SENSE_CDB_SP_NOT_ENABLED 0
+#define LOG_SENSE_CDB_PC_OFFSET 2
+#define LOG_SENSE_CDB_PC_MASK 0xC0
+#define LOG_SENSE_CDB_PC_SHIFT 6
+#define LOG_SENSE_CDB_PC_CUMULATIVE_VALUES 1
+#define LOG_SENSE_CDB_PAGE_CODE_MASK 0x3F
+#define LOG_SENSE_CDB_ALLOC_LENGTH_OFFSET 7
+#define REMAINING_INFO_EXCP_PAGE_LENGTH 0x8
+#define LOG_INFO_EXCP_PAGE_LENGTH 0xC
+#define REMAINING_TEMP_PAGE_LENGTH 0xC
+#define LOG_TEMP_PAGE_LENGTH 0x10
+#define LOG_TEMP_UNKNOWN 0xFF
+#define SUPPORTED_LOG_PAGES_PAGE_LENGTH 0x3
+
+/* Read Capacity defines */
+#define READ_CAP_10_RESP_SIZE 8
+#define READ_CAP_16_RESP_SIZE 32
+
+/* NVMe Namespace and Command Defines */
+#define NVME_GET_SMART_LOG_PAGE 0x02
+#define NVME_GET_FEAT_TEMP_THRESH 0x04
+#define BYTES_TO_DWORDS 4
+#define NVME_MAX_FIRMWARE_SLOT 7
+
+/* Report LUNs defines */
+#define REPORT_LUNS_FIRST_LUN_OFFSET 8
+
+/* SCSI ADDITIONAL SENSE Codes */
+
+#define SCSI_ASC_NO_SENSE 0x00
+#define SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT 0x03
+#define SCSI_ASC_LUN_NOT_READY 0x04
+#define SCSI_ASC_WARNING 0x0B
+#define SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED 0x10
+#define SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED 0x10
+#define SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED 0x10
+#define SCSI_ASC_UNRECOVERED_READ_ERROR 0x11
+#define SCSI_ASC_MISCOMPARE_DURING_VERIFY 0x1D
+#define SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID 0x20
+#define SCSI_ASC_ILLEGAL_COMMAND 0x20
+#define SCSI_ASC_ILLEGAL_BLOCK 0x21
+#define SCSI_ASC_INVALID_CDB 0x24
+#define SCSI_ASC_INVALID_LUN 0x25
+#define SCSI_ASC_INVALID_PARAMETER 0x26
+#define SCSI_ASC_FORMAT_COMMAND_FAILED 0x31
+#define SCSI_ASC_INTERNAL_TARGET_FAILURE 0x44
+
+/* SCSI ADDITIONAL SENSE Code Qualifiers */
+
+#define SCSI_ASCQ_CAUSE_NOT_REPORTABLE 0x00
+#define SCSI_ASCQ_FORMAT_COMMAND_FAILED 0x01
+#define SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED 0x01
+#define SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED 0x02
+#define SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED 0x03
+#define SCSI_ASCQ_FORMAT_IN_PROGRESS 0x04
+#define SCSI_ASCQ_POWER_LOSS_EXPECTED 0x08
+#define SCSI_ASCQ_INVALID_LUN_ID 0x09
+
+/**
+ * DEVICE_SPECIFIC_PARAMETER in mode parameter header (see sbc2r16) to
+ * enable DPOFUA support type 0x10 value.
+ */
+#define DEVICE_SPECIFIC_PARAMETER 0
+#define VPD_ID_DESCRIPTOR_LENGTH sizeof(VPD_IDENTIFICATION_DESCRIPTOR)
+
+/* MACROs to extract information from CDBs */
+
+#define GET_OPCODE(cdb) cdb[0]
+
+#define GET_U8_FROM_CDB(cdb, index) (cdb[index] << 0)
+
+#define GET_U16_FROM_CDB(cdb, index) ((cdb[index] << 8) | (cdb[index + 1] << 0))
+
+#define GET_U24_FROM_CDB(cdb, index) ((cdb[index] << 16) | \
+(cdb[index + 1] << 8) | \
+(cdb[index + 2] << 0))
+
+#define GET_U32_FROM_CDB(cdb, index) ((cdb[index] << 24) | \
+(cdb[index + 1] << 16) | \
+(cdb[index + 2] << 8) | \
+(cdb[index + 3] << 0))
+
+#define GET_U64_FROM_CDB(cdb, index) ((((u64)cdb[index]) << 56) | \
+(((u64)cdb[index + 1]) << 48) | \
+(((u64)cdb[index + 2]) << 40) | \
+(((u64)cdb[index + 3]) << 32) | \
+(((u64)cdb[index + 4]) << 24) | \
+(((u64)cdb[index + 5]) << 16) | \
+(((u64)cdb[index + 6]) << 8) | \
+(((u64)cdb[index + 7]) << 0))
+
+/* Inquiry Helper Macros */
+#define GET_INQ_EVPD_BIT(cdb) \
+((GET_U8_FROM_CDB(cdb, INQUIRY_EVPD_BYTE_OFFSET) & \
+INQUIRY_EVPD_BIT_MASK) ? 1 : 0)
+
+#define GET_INQ_PAGE_CODE(cdb) \
+(GET_U8_FROM_CDB(cdb, INQUIRY_PAGE_CODE_BYTE_OFFSET))
+
+#define GET_INQ_ALLOC_LENGTH(cdb) \
+(GET_U16_FROM_CDB(cdb, INQUIRY_CDB_ALLOCATION_LENGTH_OFFSET))
+
+/* Report LUNs Helper Macros */
+#define GET_REPORT_LUNS_ALLOC_LENGTH(cdb) \
+(GET_U32_FROM_CDB(cdb, REPORT_LUNS_CDB_ALLOC_LENGTH_OFFSET))
+
+/* Read Capacity Helper Macros */
+#define GET_READ_CAP_16_ALLOC_LENGTH(cdb) \
+(GET_U32_FROM_CDB(cdb, READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET))
+
+#define IS_READ_CAP_16(cdb) \
+((cdb[0] == SERVICE_ACTION_IN && cdb[1] == SAI_READ_CAPACITY_16) ? 1 : 0)
+
+/* Request Sense Helper Macros */
+#define GET_REQUEST_SENSE_ALLOC_LENGTH(cdb) \
+(GET_U8_FROM_CDB(cdb, REQUEST_SENSE_CDB_ALLOC_LENGTH_OFFSET))
+
+/* Mode Sense Helper Macros */
+#define GET_MODE_SENSE_DBD(cdb) \
+((GET_U8_FROM_CDB(cdb, MODE_SENSE_DBD_OFFSET) & MODE_SENSE_DBD_MASK) >> \
+MODE_SENSE_DBD_SHIFT)
+
+#define GET_MODE_SENSE_LLBAA(cdb) \
+((GET_U8_FROM_CDB(cdb, MODE_SENSE_LLBAA_OFFSET) & \
+MODE_SENSE_LLBAA_MASK) >> MODE_SENSE_LLBAA_SHIFT)
+
+#define GET_MODE_SENSE_MPH_SIZE(cdb10) \
+(cdb10 ? MODE_SENSE10_MPH_SIZE : MODE_SENSE6_MPH_SIZE)
+
+
+/* Struct to gather data that needs to be extracted from a SCSI CDB.
+ Not conforming to any particular CDB variant, but compatible with all. */
+
+struct nvme_trans_io_cdb {
+ u8 fua;
+ u8 prot_info;
+ u64 lba;
+ u32 xfer_len;
+};
+
+
+/* Internal Helper Functions */
+
+
+/* Copy data to userspace memory */
+
+static int nvme_trans_copy_to_user(struct sg_io_hdr *hdr, void *from,
+ unsigned long n)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ unsigned long not_copied;
+ int i;
+ void *index = from;
+ size_t remaining = n;
+ size_t xfer_len;
+
+ if (hdr->iovec_count > 0) {
+ struct sg_iovec sgl;
+
+ for (i = 0; i < hdr->iovec_count; i++) {
+ not_copied = copy_from_user(&sgl, hdr->dxferp +
+ i * sizeof(struct sg_iovec),
+ sizeof(struct sg_iovec));
+ if (not_copied)
+ return -EFAULT;
+ xfer_len = min(remaining, sgl.iov_len);
+ not_copied = copy_to_user(sgl.iov_base, index,
+ xfer_len);
+ if (not_copied) {
+ res = -EFAULT;
+ break;
+ }
+ index += xfer_len;
+ remaining -= xfer_len;
+ if (remaining == 0)
+ break;
+ }
+ return res;
+ }
+ not_copied = copy_to_user(hdr->dxferp, from, n);
+ if (not_copied)
+ res = -EFAULT;
+ return res;
+}
+
+/* Copy data from userspace memory */
+
+static int nvme_trans_copy_from_user(struct sg_io_hdr *hdr, void *to,
+ unsigned long n)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ unsigned long not_copied;
+ int i;
+ void *index = to;
+ size_t remaining = n;
+ size_t xfer_len;
+
+ if (hdr->iovec_count > 0) {
+ struct sg_iovec sgl;
+
+ for (i = 0; i < hdr->iovec_count; i++) {
+ not_copied = copy_from_user(&sgl, hdr->dxferp +
+ i * sizeof(struct sg_iovec),
+ sizeof(struct sg_iovec));
+ if (not_copied)
+ return -EFAULT;
+ xfer_len = min(remaining, sgl.iov_len);
+ not_copied = copy_from_user(index, sgl.iov_base,
+ xfer_len);
+ if (not_copied) {
+ res = -EFAULT;
+ break;
+ }
+ index += xfer_len;
+ remaining -= xfer_len;
+ if (remaining == 0)
+ break;
+ }
+ return res;
+ }
+
+ not_copied = copy_from_user(to, hdr->dxferp, n);
+ if (not_copied)
+ res = -EFAULT;
+ return res;
+}
+
+/* Status/Sense Buffer Writeback */
+
+static int nvme_trans_completion(struct sg_io_hdr *hdr, u8 status, u8 sense_key,
+ u8 asc, u8 ascq)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u8 xfer_len;
+ u8 resp[DESC_FMT_SENSE_DATA_SIZE];
+
+ if (scsi_status_is_good(status)) {
+ hdr->status = SAM_STAT_GOOD;
+ hdr->masked_status = GOOD;
+ hdr->host_status = DID_OK;
+ hdr->driver_status = DRIVER_OK;
+ hdr->sb_len_wr = 0;
+ } else {
+ hdr->status = status;
+ hdr->masked_status = status >> 1;
+ hdr->host_status = DID_OK;
+ hdr->driver_status = DRIVER_OK;
+
+ memset(resp, 0, DESC_FMT_SENSE_DATA_SIZE);
+ resp[0] = DESC_FORMAT_SENSE_DATA;
+ resp[1] = sense_key;
+ resp[2] = asc;
+ resp[3] = ascq;
+
+ xfer_len = min_t(u8, hdr->mx_sb_len, DESC_FMT_SENSE_DATA_SIZE);
+ hdr->sb_len_wr = xfer_len;
+ if (copy_to_user(hdr->sbp, resp, xfer_len) > 0)
+ res = -EFAULT;
+ }
+
+ return res;
+}
+
+static int nvme_trans_status_code(struct sg_io_hdr *hdr, int nvme_sc)
+{
+ u8 status, sense_key, asc, ascq;
+ int res = SNTI_TRANSLATION_SUCCESS;
+
+ /* For non-nvme (Linux) errors, simply return the error code */
+ if (nvme_sc < 0)
+ return nvme_sc;
+
+ /* Mask DNR, More, and reserved fields */
+ nvme_sc &= 0x7FF;
+
+ switch (nvme_sc) {
+ /* Generic Command Status */
+ case NVME_SC_SUCCESS:
+ status = SAM_STAT_GOOD;
+ sense_key = NO_SENSE;
+ asc = SCSI_ASC_NO_SENSE;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_INVALID_OPCODE:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ILLEGAL_REQUEST;
+ asc = SCSI_ASC_ILLEGAL_COMMAND;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_INVALID_FIELD:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ILLEGAL_REQUEST;
+ asc = SCSI_ASC_INVALID_CDB;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_DATA_XFER_ERROR:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = MEDIUM_ERROR;
+ asc = SCSI_ASC_NO_SENSE;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_POWER_LOSS:
+ status = SAM_STAT_TASK_ABORTED;
+ sense_key = ABORTED_COMMAND;
+ asc = SCSI_ASC_WARNING;
+ ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
+ break;
+ case NVME_SC_INTERNAL:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = HARDWARE_ERROR;
+ asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_ABORT_REQ:
+ status = SAM_STAT_TASK_ABORTED;
+ sense_key = ABORTED_COMMAND;
+ asc = SCSI_ASC_NO_SENSE;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_ABORT_QUEUE:
+ status = SAM_STAT_TASK_ABORTED;
+ sense_key = ABORTED_COMMAND;
+ asc = SCSI_ASC_NO_SENSE;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_FUSED_FAIL:
+ status = SAM_STAT_TASK_ABORTED;
+ sense_key = ABORTED_COMMAND;
+ asc = SCSI_ASC_NO_SENSE;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_FUSED_MISSING:
+ status = SAM_STAT_TASK_ABORTED;
+ sense_key = ABORTED_COMMAND;
+ asc = SCSI_ASC_NO_SENSE;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_INVALID_NS:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ILLEGAL_REQUEST;
+ asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
+ ascq = SCSI_ASCQ_INVALID_LUN_ID;
+ break;
+ case NVME_SC_LBA_RANGE:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ILLEGAL_REQUEST;
+ asc = SCSI_ASC_ILLEGAL_BLOCK;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_CAP_EXCEEDED:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = MEDIUM_ERROR;
+ asc = SCSI_ASC_NO_SENSE;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_NS_NOT_READY:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = NOT_READY;
+ asc = SCSI_ASC_LUN_NOT_READY;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+
+ /* Command Specific Status */
+ case NVME_SC_INVALID_FORMAT:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ILLEGAL_REQUEST;
+ asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
+ ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
+ break;
+ case NVME_SC_BAD_ATTRIBUTES:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ILLEGAL_REQUEST;
+ asc = SCSI_ASC_INVALID_CDB;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+
+ /* Media Errors */
+ case NVME_SC_WRITE_FAULT:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = MEDIUM_ERROR;
+ asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_READ_ERROR:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = MEDIUM_ERROR;
+ asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_GUARD_CHECK:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = MEDIUM_ERROR;
+ asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
+ ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
+ break;
+ case NVME_SC_APPTAG_CHECK:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = MEDIUM_ERROR;
+ asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
+ ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
+ break;
+ case NVME_SC_REFTAG_CHECK:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = MEDIUM_ERROR;
+ asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
+ ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
+ break;
+ case NVME_SC_COMPARE_FAILED:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = MISCOMPARE;
+ asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_ACCESS_DENIED:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ILLEGAL_REQUEST;
+ asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
+ ascq = SCSI_ASCQ_INVALID_LUN_ID;
+ break;
+
+ /* Unspecified/Default */
+ case NVME_SC_CMDID_CONFLICT:
+ case NVME_SC_CMD_SEQ_ERROR:
+ case NVME_SC_CQ_INVALID:
+ case NVME_SC_QID_INVALID:
+ case NVME_SC_QUEUE_SIZE:
+ case NVME_SC_ABORT_LIMIT:
+ case NVME_SC_ABORT_MISSING:
+ case NVME_SC_ASYNC_LIMIT:
+ case NVME_SC_FIRMWARE_SLOT:
+ case NVME_SC_FIRMWARE_IMAGE:
+ case NVME_SC_INVALID_VECTOR:
+ case NVME_SC_INVALID_LOG_PAGE:
+ default:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ILLEGAL_REQUEST;
+ asc = SCSI_ASC_NO_SENSE;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ }
+
+ res = nvme_trans_completion(hdr, status, sense_key, asc, ascq);
+
+ return res;
+}
+
+/* INQUIRY Helper Functions */
+
+static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr, u8 *inq_response,
+ int alloc_len)
+{
+ struct nvme_dev *dev = ns->dev;
+ dma_addr_t dma_addr;
+ void *mem;
+ struct nvme_id_ns *id_ns;
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ int xfer_len;
+ u8 resp_data_format = 0x02;
+ u8 protect;
+ u8 cmdque = 0x01 << 1;
+
+ mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out_dma;
+ }
+
+ /* nvme ns identify - use DPS value for PROTECT field */
+ nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ /*
+ * If nvme_sc was -ve, res will be -ve here.
+ * If nvme_sc was +ve, the status would bace been translated, and res
+ * can only be 0 or -ve.
+ * - If 0 && nvme_sc > 0, then go into next if where res gets nvme_sc
+ * - If -ve, return because its a Linux error.
+ */
+ if (res)
+ goto out_free;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_free;
+ }
+ id_ns = mem;
+ (id_ns->dps) ? (protect = 0x01) : (protect = 0);
+
+ memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
+ inq_response[2] = VERSION_SPC_4;
+ inq_response[3] = resp_data_format; /*normaca=0 | hisup=0 */
+ inq_response[4] = ADDITIONAL_STD_INQ_LENGTH;
+ inq_response[5] = protect; /* sccs=0 | acc=0 | tpgs=0 | pc3=0 */
+ inq_response[7] = cmdque; /* wbus16=0 | sync=0 | vs=0 */
+ strncpy(&inq_response[8], "NVMe ", 8);
+ strncpy(&inq_response[16], dev->model, 16);
+ strncpy(&inq_response[32], dev->firmware_rev, 4);
+
+ xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
+ res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
+
+ out_free:
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
+ dma_addr);
+ out_dma:
+ return res;
+}
+
+static int nvme_trans_supported_vpd_pages(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr, u8 *inq_response,
+ int alloc_len)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int xfer_len;
+
+ memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
+ inq_response[1] = INQ_SUPPORTED_VPD_PAGES_PAGE; /* Page Code */
+ inq_response[3] = INQ_NUM_SUPPORTED_VPD_PAGES; /* Page Length */
+ inq_response[4] = INQ_SUPPORTED_VPD_PAGES_PAGE;
+ inq_response[5] = INQ_UNIT_SERIAL_NUMBER_PAGE;
+ inq_response[6] = INQ_DEVICE_IDENTIFICATION_PAGE;
+ inq_response[7] = INQ_EXTENDED_INQUIRY_DATA_PAGE;
+ inq_response[8] = INQ_BDEV_CHARACTERISTICS_PAGE;
+
+ xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
+ res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
+
+ return res;
+}
+
+static int nvme_trans_unit_serial_page(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr, u8 *inq_response,
+ int alloc_len)
+{
+ struct nvme_dev *dev = ns->dev;
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int xfer_len;
+
+ memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
+ inq_response[1] = INQ_UNIT_SERIAL_NUMBER_PAGE; /* Page Code */
+ inq_response[3] = INQ_SERIAL_NUMBER_LENGTH; /* Page Length */
+ strncpy(&inq_response[4], dev->serial, INQ_SERIAL_NUMBER_LENGTH);
+
+ xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
+ res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
+
+ return res;
+}
+
+static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *inq_response, int alloc_len)
+{
+ struct nvme_dev *dev = ns->dev;
+ dma_addr_t dma_addr;
+ void *mem;
+ struct nvme_id_ctrl *id_ctrl;
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ u8 ieee[4];
+ int xfer_len;
+ __be32 tmp_id = cpu_to_be32(ns->ns_id);
+
+ mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out_dma;
+ }
+
+ /* nvme controller identify */
+ nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_free;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_free;
+ }
+ id_ctrl = mem;
+
+ /* Since SCSI tried to save 4 bits... [SPC-4(r34) Table 591] */
+ ieee[0] = id_ctrl->ieee[0] << 4;
+ ieee[1] = id_ctrl->ieee[0] >> 4 | id_ctrl->ieee[1] << 4;
+ ieee[2] = id_ctrl->ieee[1] >> 4 | id_ctrl->ieee[2] << 4;
+ ieee[3] = id_ctrl->ieee[2] >> 4;
+
+ memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
+ inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE; /* Page Code */
+ inq_response[3] = 20; /* Page Length */
+ /* Designation Descriptor start */
+ inq_response[4] = 0x01; /* Proto ID=0h | Code set=1h */
+ inq_response[5] = 0x03; /* PIV=0b | Asso=00b | Designator Type=3h */
+ inq_response[6] = 0x00; /* Rsvd */
+ inq_response[7] = 16; /* Designator Length */
+ /* Designator start */
+ inq_response[8] = 0x60 | ieee[3]; /* NAA=6h | IEEE ID MSB, High nibble*/
+ inq_response[9] = ieee[2]; /* IEEE ID */
+ inq_response[10] = ieee[1]; /* IEEE ID */
+ inq_response[11] = ieee[0]; /* IEEE ID| Vendor Specific ID... */
+ inq_response[12] = (dev->pci_dev->vendor & 0xFF00) >> 8;
+ inq_response[13] = (dev->pci_dev->vendor & 0x00FF);
+ inq_response[14] = dev->serial[0];
+ inq_response[15] = dev->serial[1];
+ inq_response[16] = dev->model[0];
+ inq_response[17] = dev->model[1];
+ memcpy(&inq_response[18], &tmp_id, sizeof(u32));
+ /* Last 2 bytes are zero */
+
+ xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
+ res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
+
+ out_free:
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
+ dma_addr);
+ out_dma:
+ return res;
+}
+
+static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ int alloc_len)
+{
+ u8 *inq_response;
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_dev *dev = ns->dev;
+ dma_addr_t dma_addr;
+ void *mem;
+ struct nvme_id_ctrl *id_ctrl;
+ struct nvme_id_ns *id_ns;
+ int xfer_len;
+ u8 microcode = 0x80;
+ u8 spt;
+ u8 spt_lut[8] = {0, 0, 2, 1, 4, 6, 5, 7};
+ u8 grd_chk, app_chk, ref_chk, protect;
+ u8 uask_sup = 0x20;
+ u8 v_sup;
+ u8 luiclr = 0x01;
+
+ inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
+ if (inq_response == NULL) {
+ res = -ENOMEM;
+ goto out_mem;
+ }
+
+ mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out_dma;
+ }
+
+ /* nvme ns identify */
+ nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_free;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_free;
+ }
+ id_ns = mem;
+ spt = spt_lut[(id_ns->dpc) & 0x07] << 3;
+ (id_ns->dps) ? (protect = 0x01) : (protect = 0);
+ grd_chk = protect << 2;
+ app_chk = protect << 1;
+ ref_chk = protect;
+
+ /* nvme controller identify */
+ nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_free;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_free;
+ }
+ id_ctrl = mem;
+ v_sup = id_ctrl->vwc;
+
+ memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
+ inq_response[1] = INQ_EXTENDED_INQUIRY_DATA_PAGE; /* Page Code */
+ inq_response[2] = 0x00; /* Page Length MSB */
+ inq_response[3] = 0x3C; /* Page Length LSB */
+ inq_response[4] = microcode | spt | grd_chk | app_chk | ref_chk;
+ inq_response[5] = uask_sup;
+ inq_response[6] = v_sup;
+ inq_response[7] = luiclr;
+ inq_response[8] = 0;
+ inq_response[9] = 0;
+
+ xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
+ res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
+
+ out_free:
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
+ dma_addr);
+ out_dma:
+ kfree(inq_response);
+ out_mem:
+ return res;
+}
+
+static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ int alloc_len)
+{
+ u8 *inq_response;
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int xfer_len;
+
+ inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
+ if (inq_response == NULL) {
+ res = -ENOMEM;
+ goto out_mem;
+ }
+
+ memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
+ inq_response[1] = INQ_BDEV_CHARACTERISTICS_PAGE; /* Page Code */
+ inq_response[2] = 0x00; /* Page Length MSB */
+ inq_response[3] = 0x3C; /* Page Length LSB */
+ inq_response[4] = 0x00; /* Medium Rotation Rate MSB */
+ inq_response[5] = 0x01; /* Medium Rotation Rate LSB */
+ inq_response[6] = 0x00; /* Form Factor */
+
+ xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
+ res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
+
+ kfree(inq_response);
+ out_mem:
+ return res;
+}
+
+/* LOG SENSE Helper Functions */
+
+static int nvme_trans_log_supp_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ int alloc_len)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int xfer_len;
+ u8 *log_response;
+
+ log_response = kmalloc(LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH, GFP_KERNEL);
+ if (log_response == NULL) {
+ res = -ENOMEM;
+ goto out_mem;
+ }
+ memset(log_response, 0, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH);
+
+ log_response[0] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE;
+ /* Subpage=0x00, Page Length MSB=0 */
+ log_response[3] = SUPPORTED_LOG_PAGES_PAGE_LENGTH;
+ log_response[4] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE;
+ log_response[5] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE;
+ log_response[6] = LOG_PAGE_TEMPERATURE_PAGE;
+
+ xfer_len = min(alloc_len, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH);
+ res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
+
+ kfree(log_response);
+ out_mem:
+ return res;
+}
+
+static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr, int alloc_len)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int xfer_len;
+ u8 *log_response;
+ struct nvme_command c;
+ struct nvme_dev *dev = ns->dev;
+ struct nvme_smart_log *smart_log;
+ dma_addr_t dma_addr;
+ void *mem;
+ u8 temp_c;
+ u16 temp_k;
+
+ log_response = kmalloc(LOG_INFO_EXCP_PAGE_LENGTH, GFP_KERNEL);
+ if (log_response == NULL) {
+ res = -ENOMEM;
+ goto out_mem;
+ }
+ memset(log_response, 0, LOG_INFO_EXCP_PAGE_LENGTH);
+
+ mem = dma_alloc_coherent(&dev->pci_dev->dev,
+ sizeof(struct nvme_smart_log),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out_dma;
+ }
+
+ /* Get SMART Log Page */
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = nvme_admin_get_log_page;
+ c.common.nsid = cpu_to_le32(0xFFFFFFFF);
+ c.common.prp1 = cpu_to_le64(dma_addr);
+ c.common.cdw10[0] = cpu_to_le32(((sizeof(struct nvme_smart_log) /
+ BYTES_TO_DWORDS) << 16) | NVME_GET_SMART_LOG_PAGE);
+ res = nvme_submit_admin_cmd(dev, &c, NULL);
+ if (res != NVME_SC_SUCCESS) {
+ temp_c = LOG_TEMP_UNKNOWN;
+ } else {
+ smart_log = mem;
+ temp_k = (smart_log->temperature[1] << 8) +
+ (smart_log->temperature[0]);
+ temp_c = temp_k - KELVIN_TEMP_FACTOR;
+ }
+
+ log_response[0] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE;
+ /* Subpage=0x00, Page Length MSB=0 */
+ log_response[3] = REMAINING_INFO_EXCP_PAGE_LENGTH;
+ /* Informational Exceptions Log Parameter 1 Start */
+ /* Parameter Code=0x0000 bytes 4,5 */
+ log_response[6] = 0x23; /* DU=0, TSD=1, ETC=0, TMC=0, FMT_AND_LNK=11b */
+ log_response[7] = 0x04; /* PARAMETER LENGTH */
+ /* Add sense Code and qualifier = 0x00 each */
+ /* Use Temperature from NVMe Get Log Page, convert to C from K */
+ log_response[10] = temp_c;
+
+ xfer_len = min(alloc_len, LOG_INFO_EXCP_PAGE_LENGTH);
+ res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
+
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log),
+ mem, dma_addr);
+ out_dma:
+ kfree(log_response);
+ out_mem:
+ return res;
+}
+
+static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ int alloc_len)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int xfer_len;
+ u8 *log_response;
+ struct nvme_command c;
+ struct nvme_dev *dev = ns->dev;
+ struct nvme_smart_log *smart_log;
+ dma_addr_t dma_addr;
+ void *mem;
+ u32 feature_resp;
+ u8 temp_c_cur, temp_c_thresh;
+ u16 temp_k;
+
+ log_response = kmalloc(LOG_TEMP_PAGE_LENGTH, GFP_KERNEL);
+ if (log_response == NULL) {
+ res = -ENOMEM;
+ goto out_mem;
+ }
+ memset(log_response, 0, LOG_TEMP_PAGE_LENGTH);
+
+ mem = dma_alloc_coherent(&dev->pci_dev->dev,
+ sizeof(struct nvme_smart_log),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out_dma;
+ }
+
+ /* Get SMART Log Page */
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = nvme_admin_get_log_page;
+ c.common.nsid = cpu_to_le32(0xFFFFFFFF);
+ c.common.prp1 = cpu_to_le64(dma_addr);
+ c.common.cdw10[0] = cpu_to_le32(((sizeof(struct nvme_smart_log) /
+ BYTES_TO_DWORDS) << 16) | NVME_GET_SMART_LOG_PAGE);
+ res = nvme_submit_admin_cmd(dev, &c, NULL);
+ if (res != NVME_SC_SUCCESS) {
+ temp_c_cur = LOG_TEMP_UNKNOWN;
+ } else {
+ smart_log = mem;
+ temp_k = (smart_log->temperature[1] << 8) +
+ (smart_log->temperature[0]);
+ temp_c_cur = temp_k - KELVIN_TEMP_FACTOR;
+ }
+
+ /* Get Features for Temp Threshold */
+ res = nvme_get_features(dev, NVME_FEAT_TEMP_THRESH, 0, 0,
+ &feature_resp);
+ if (res != NVME_SC_SUCCESS)
+ temp_c_thresh = LOG_TEMP_UNKNOWN;
+ else
+ temp_c_thresh = (feature_resp & 0xFFFF) - KELVIN_TEMP_FACTOR;
+
+ log_response[0] = LOG_PAGE_TEMPERATURE_PAGE;
+ /* Subpage=0x00, Page Length MSB=0 */
+ log_response[3] = REMAINING_TEMP_PAGE_LENGTH;
+ /* Temperature Log Parameter 1 (Temperature) Start */
+ /* Parameter Code = 0x0000 */
+ log_response[6] = 0x01; /* Format and Linking = 01b */
+ log_response[7] = 0x02; /* Parameter Length */
+ /* Use Temperature from NVMe Get Log Page, convert to C from K */
+ log_response[9] = temp_c_cur;
+ /* Temperature Log Parameter 2 (Reference Temperature) Start */
+ log_response[11] = 0x01; /* Parameter Code = 0x0001 */
+ log_response[12] = 0x01; /* Format and Linking = 01b */
+ log_response[13] = 0x02; /* Parameter Length */
+ /* Use Temperature Thresh from NVMe Get Log Page, convert to C from K */
+ log_response[15] = temp_c_thresh;
+
+ xfer_len = min(alloc_len, LOG_TEMP_PAGE_LENGTH);
+ res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
+
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log),
+ mem, dma_addr);
+ out_dma:
+ kfree(log_response);
+ out_mem:
+ return res;
+}
+
+/* MODE SENSE Helper Functions */
+
+static int nvme_trans_fill_mode_parm_hdr(u8 *resp, int len, u8 cdb10, u8 llbaa,
+ u16 mode_data_length, u16 blk_desc_len)
+{
+ /* Quick check to make sure I don't stomp on my own memory... */
+ if ((cdb10 && len < 8) || (!cdb10 && len < 4))
+ return SNTI_INTERNAL_ERROR;
+
+ if (cdb10) {
+ resp[0] = (mode_data_length & 0xFF00) >> 8;
+ resp[1] = (mode_data_length & 0x00FF);
+ /* resp[2] and [3] are zero */
+ resp[4] = llbaa;
+ resp[5] = RESERVED_FIELD;
+ resp[6] = (blk_desc_len & 0xFF00) >> 8;
+ resp[7] = (blk_desc_len & 0x00FF);
+ } else {
+ resp[0] = (mode_data_length & 0x00FF);
+ /* resp[1] and [2] are zero */
+ resp[3] = (blk_desc_len & 0x00FF);
+ }
+
+ return SNTI_TRANSLATION_SUCCESS;
+}
+
+static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *resp, int len, u8 llbaa)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_dev *dev = ns->dev;
+ dma_addr_t dma_addr;
+ void *mem;
+ struct nvme_id_ns *id_ns;
+ u8 flbas;
+ u32 lba_length;
+
+ if (llbaa == 0 && len < MODE_PAGE_BLK_DES_LEN)
+ return SNTI_INTERNAL_ERROR;
+ else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN)
+ return SNTI_INTERNAL_ERROR;
+
+ mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
+
+ /* nvme ns identify */
+ nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_dma;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_dma;
+ }
+ id_ns = mem;
+ flbas = (id_ns->flbas) & 0x0F;
+ lba_length = (1 << (id_ns->lbaf[flbas].ds));
+
+ if (llbaa == 0) {
+ __be32 tmp_cap = cpu_to_be32(le64_to_cpu(id_ns->ncap));
+ /* Byte 4 is reserved */
+ __be32 tmp_len = cpu_to_be32(lba_length & 0x00FFFFFF);
+
+ memcpy(resp, &tmp_cap, sizeof(u32));
+ memcpy(&resp[4], &tmp_len, sizeof(u32));
+ } else {
+ __be64 tmp_cap = cpu_to_be64(le64_to_cpu(id_ns->ncap));
+ __be32 tmp_len = cpu_to_be32(lba_length);
+
+ memcpy(resp, &tmp_cap, sizeof(u64));
+ /* Bytes 8, 9, 10, 11 are reserved */
+ memcpy(&resp[12], &tmp_len, sizeof(u32));
+ }
+
+ out_dma:
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
+ dma_addr);
+ out:
+ return res;
+}
+
+static int nvme_trans_fill_control_page(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr, u8 *resp,
+ int len)
+{
+ if (len < MODE_PAGE_CONTROL_LEN)
+ return SNTI_INTERNAL_ERROR;
+
+ resp[0] = MODE_PAGE_CONTROL;
+ resp[1] = MODE_PAGE_CONTROL_LEN_FIELD;
+ resp[2] = 0x0E; /* TST=000b, TMF_ONLY=0, DPICZ=1,
+ * D_SENSE=1, GLTSD=1, RLEC=0 */
+ resp[3] = 0x12; /* Q_ALGO_MODIFIER=1h, NUAR=0, QERR=01b */
+ /* Byte 4: VS=0, RAC=0, UA_INT=0, SWP=0 */
+ resp[5] = 0x40; /* ATO=0, TAS=1, ATMPE=0, RWWP=0, AUTOLOAD=0 */
+ /* resp[6] and [7] are obsolete, thus zero */
+ resp[8] = 0xFF; /* Busy timeout period = 0xffff */
+ resp[9] = 0xFF;
+ /* Bytes 10,11: Extended selftest completion time = 0x0000 */
+
+ return SNTI_TRANSLATION_SUCCESS;
+}
+
+static int nvme_trans_fill_caching_page(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr,
+ u8 *resp, int len)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_dev *dev = ns->dev;
+ u32 feature_resp;
+ u8 vwc;
+
+ if (len < MODE_PAGE_CACHING_LEN)
+ return SNTI_INTERNAL_ERROR;
+
+ nvme_sc = nvme_get_features(dev, NVME_FEAT_VOLATILE_WC, 0, 0,
+ &feature_resp);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out;
+ }
+ vwc = feature_resp & 0x00000001;
+
+ resp[0] = MODE_PAGE_CACHING;
+ resp[1] = MODE_PAGE_CACHING_LEN_FIELD;
+ resp[2] = vwc << 2;
+
+ out:
+ return res;
+}
+
+static int nvme_trans_fill_pow_cnd_page(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr, u8 *resp,
+ int len)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+
+ if (len < MODE_PAGE_POW_CND_LEN)
+ return SNTI_INTERNAL_ERROR;
+
+ resp[0] = MODE_PAGE_POWER_CONDITION;
+ resp[1] = MODE_PAGE_POW_CND_LEN_FIELD;
+ /* All other bytes are zero */
+
+ return res;
+}
+
+static int nvme_trans_fill_inf_exc_page(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr, u8 *resp,
+ int len)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+
+ if (len < MODE_PAGE_INF_EXC_LEN)
+ return SNTI_INTERNAL_ERROR;
+
+ resp[0] = MODE_PAGE_INFO_EXCEP;
+ resp[1] = MODE_PAGE_INF_EXC_LEN_FIELD;
+ resp[2] = 0x88;
+ /* All other bytes are zero */
+
+ return res;
+}
+
+static int nvme_trans_fill_all_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *resp, int len)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u16 mode_pages_offset_1 = 0;
+ u16 mode_pages_offset_2, mode_pages_offset_3, mode_pages_offset_4;
+
+ mode_pages_offset_2 = mode_pages_offset_1 + MODE_PAGE_CACHING_LEN;
+ mode_pages_offset_3 = mode_pages_offset_2 + MODE_PAGE_CONTROL_LEN;
+ mode_pages_offset_4 = mode_pages_offset_3 + MODE_PAGE_POW_CND_LEN;
+
+ res = nvme_trans_fill_caching_page(ns, hdr, &resp[mode_pages_offset_1],
+ MODE_PAGE_CACHING_LEN);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out;
+ res = nvme_trans_fill_control_page(ns, hdr, &resp[mode_pages_offset_2],
+ MODE_PAGE_CONTROL_LEN);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out;
+ res = nvme_trans_fill_pow_cnd_page(ns, hdr, &resp[mode_pages_offset_3],
+ MODE_PAGE_POW_CND_LEN);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out;
+ res = nvme_trans_fill_inf_exc_page(ns, hdr, &resp[mode_pages_offset_4],
+ MODE_PAGE_INF_EXC_LEN);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out;
+
+ out:
+ return res;
+}
+
+static inline int nvme_trans_get_blk_desc_len(u8 dbd, u8 llbaa)
+{
+ if (dbd == MODE_SENSE_BLK_DESC_ENABLED) {
+ /* SPC-4: len = 8 x Num_of_descriptors if llbaa = 0, 16x if 1 */
+ return 8 * (llbaa + 1) * MODE_SENSE_BLK_DESC_COUNT;
+ } else {
+ return 0;
+ }
+}
+
+static int nvme_trans_mode_page_create(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr, u8 *cmd,
+ u16 alloc_len, u8 cdb10,
+ int (*mode_page_fill_func)
+ (struct nvme_ns *,
+ struct sg_io_hdr *hdr, u8 *, int),
+ u16 mode_pages_tot_len)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int xfer_len;
+ u8 *response;
+ u8 dbd, llbaa;
+ u16 resp_size;
+ int mph_size;
+ u16 mode_pages_offset_1;
+ u16 blk_desc_len, blk_desc_offset, mode_data_length;
+
+ dbd = GET_MODE_SENSE_DBD(cmd);
+ llbaa = GET_MODE_SENSE_LLBAA(cmd);
+ mph_size = GET_MODE_SENSE_MPH_SIZE(cdb10);
+ blk_desc_len = nvme_trans_get_blk_desc_len(dbd, llbaa);
+
+ resp_size = mph_size + blk_desc_len + mode_pages_tot_len;
+ /* Refer spc4r34 Table 440 for calculation of Mode data Length field */
+ mode_data_length = 3 + (3 * cdb10) + blk_desc_len + mode_pages_tot_len;
+
+ blk_desc_offset = mph_size;
+ mode_pages_offset_1 = blk_desc_offset + blk_desc_len;
+
+ response = kmalloc(resp_size, GFP_KERNEL);
+ if (response == NULL) {
+ res = -ENOMEM;
+ goto out_mem;
+ }
+ memset(response, 0, resp_size);
+
+ res = nvme_trans_fill_mode_parm_hdr(&response[0], mph_size, cdb10,
+ llbaa, mode_data_length, blk_desc_len);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out_free;
+ if (blk_desc_len > 0) {
+ res = nvme_trans_fill_blk_desc(ns, hdr,
+ &response[blk_desc_offset],
+ blk_desc_len, llbaa);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out_free;
+ }
+ res = mode_page_fill_func(ns, hdr, &response[mode_pages_offset_1],
+ mode_pages_tot_len);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out_free;
+
+ xfer_len = min(alloc_len, resp_size);
+ res = nvme_trans_copy_to_user(hdr, response, xfer_len);
+
+ out_free:
+ kfree(response);
+ out_mem:
+ return res;
+}
+
+/* Read Capacity Helper Functions */
+
+static void nvme_trans_fill_read_cap(u8 *response, struct nvme_id_ns *id_ns,
+ u8 cdb16)
+{
+ u8 flbas;
+ u32 lba_length;
+ u64 rlba;
+ u8 prot_en;
+ u8 p_type_lut[4] = {0, 0, 1, 2};
+ __be64 tmp_rlba;
+ __be32 tmp_rlba_32;
+ __be32 tmp_len;
+
+ flbas = (id_ns->flbas) & 0x0F;
+ lba_length = (1 << (id_ns->lbaf[flbas].ds));
+ rlba = le64_to_cpup(&id_ns->nsze) - 1;
+ (id_ns->dps) ? (prot_en = 0x01) : (prot_en = 0);
+
+ if (!cdb16) {
+ if (rlba > 0xFFFFFFFF)
+ rlba = 0xFFFFFFFF;
+ tmp_rlba_32 = cpu_to_be32(rlba);
+ tmp_len = cpu_to_be32(lba_length);
+ memcpy(response, &tmp_rlba_32, sizeof(u32));
+ memcpy(&response[4], &tmp_len, sizeof(u32));
+ } else {
+ tmp_rlba = cpu_to_be64(rlba);
+ tmp_len = cpu_to_be32(lba_length);
+ memcpy(response, &tmp_rlba, sizeof(u64));
+ memcpy(&response[8], &tmp_len, sizeof(u32));
+ response[12] = (p_type_lut[id_ns->dps & 0x3] << 1) | prot_en;
+ /* P_I_Exponent = 0x0 | LBPPBE = 0x0 */
+ /* LBPME = 0 | LBPRZ = 0 | LALBA = 0x00 */
+ /* Bytes 16-31 - Reserved */
+ }
+}
+
+/* Start Stop Unit Helper Functions */
+
+static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 pc, u8 pcmod, u8 start)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_dev *dev = ns->dev;
+ dma_addr_t dma_addr;
+ void *mem;
+ struct nvme_id_ctrl *id_ctrl;
+ int lowest_pow_st; /* max npss = lowest power consumption */
+ unsigned ps_desired = 0;
+
+ /* NVMe Controller Identify */
+ mem = dma_alloc_coherent(&dev->pci_dev->dev,
+ sizeof(struct nvme_id_ctrl),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
+ nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_dma;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_dma;
+ }
+ id_ctrl = mem;
+ lowest_pow_st = id_ctrl->npss - 1;
+
+ switch (pc) {
+ case NVME_POWER_STATE_START_VALID:
+ /* Action unspecified if POWER CONDITION MODIFIER != 0 */
+ if (pcmod == 0 && start == 0x1)
+ ps_desired = POWER_STATE_0;
+ if (pcmod == 0 && start == 0x0)
+ ps_desired = lowest_pow_st;
+ break;
+ case NVME_POWER_STATE_ACTIVE:
+ /* Action unspecified if POWER CONDITION MODIFIER != 0 */
+ if (pcmod == 0)
+ ps_desired = POWER_STATE_0;
+ break;
+ case NVME_POWER_STATE_IDLE:
+ /* Action unspecified if POWER CONDITION MODIFIER != [0,1,2] */
+ /* min of desired state and (lps-1) because lps is STOP */
+ if (pcmod == 0x0)
+ ps_desired = min(POWER_STATE_1, (lowest_pow_st - 1));
+ else if (pcmod == 0x1)
+ ps_desired = min(POWER_STATE_2, (lowest_pow_st - 1));
+ else if (pcmod == 0x2)
+ ps_desired = min(POWER_STATE_3, (lowest_pow_st - 1));
+ break;
+ case NVME_POWER_STATE_STANDBY:
+ /* Action unspecified if POWER CONDITION MODIFIER != [0,1] */
+ if (pcmod == 0x0)
+ ps_desired = max(0, (lowest_pow_st - 2));
+ else if (pcmod == 0x1)
+ ps_desired = max(0, (lowest_pow_st - 1));
+ break;
+ case NVME_POWER_STATE_LU_CONTROL:
+ default:
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ break;
+ }
+ nvme_sc = nvme_set_features(dev, NVME_FEAT_POWER_MGMT, ps_desired, 0,
+ NULL);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_dma;
+ if (nvme_sc)
+ res = nvme_sc;
+ out_dma:
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem,
+ dma_addr);
+ out:
+ return res;
+}
+
+/* Write Buffer Helper Functions */
+/* Also using this for Format Unit with hdr passed as NULL, and buffer_id, 0 */
+
+static int nvme_trans_send_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 opcode, u32 tot_len, u32 offset,
+ u8 buffer_id)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_dev *dev = ns->dev;
+ struct nvme_command c;
+ struct nvme_iod *iod = NULL;
+ unsigned length;
+
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = opcode;
+ if (opcode == nvme_admin_download_fw) {
+ if (hdr->iovec_count > 0) {
+ /* Assuming SGL is not allowed for this command */
+ res = nvme_trans_completion(hdr,
+ SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST,
+ SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out;
+ }
+ iod = nvme_map_user_pages(dev, DMA_TO_DEVICE,
+ (unsigned long)hdr->dxferp, tot_len);
+ if (IS_ERR(iod)) {
+ res = PTR_ERR(iod);
+ goto out;
+ }
+ length = nvme_setup_prps(dev, &c.common, iod, tot_len,
+ GFP_KERNEL);
+ if (length != tot_len) {
+ res = -ENOMEM;
+ goto out_unmap;
+ }
+
+ c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1);
+ c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS);
+ } else if (opcode == nvme_admin_activate_fw) {
+ u32 cdw10 = buffer_id | NVME_FWACT_REPL_ACTV;
+ c.common.cdw10[0] = cpu_to_le32(cdw10);
+ }
+
+ nvme_sc = nvme_submit_admin_cmd(dev, &c, NULL);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_unmap;
+ if (nvme_sc)
+ res = nvme_sc;
+
+ out_unmap:
+ if (opcode == nvme_admin_download_fw) {
+ nvme_unmap_user_pages(dev, DMA_TO_DEVICE, iod);
+ nvme_free_iod(dev, iod);
+ }
+ out:
+ return res;
+}
+
+/* Mode Select Helper Functions */
+
+static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10,
+ u16 *bd_len, u8 *llbaa)
+{
+ if (cdb10) {
+ /* 10 Byte CDB */
+ *bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) +
+ parm_list[MODE_SELECT_10_BD_OFFSET + 1];
+ *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] &&
+ MODE_SELECT_10_LLBAA_MASK;
+ } else {
+ /* 6 Byte CDB */
+ *bd_len = parm_list[MODE_SELECT_6_BD_OFFSET];
+ }
+}
+
+static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list,
+ u16 idx, u16 bd_len, u8 llbaa)
+{
+ u16 bd_num;
+
+ bd_num = bd_len / ((llbaa == 0) ?
+ SHORT_DESC_BLOCK : LONG_DESC_BLOCK);
+ /* Store block descriptor info if a FORMAT UNIT comes later */
+ /* TODO Saving 1st BD info; what to do if multiple BD received? */
+ if (llbaa == 0) {
+ /* Standard Block Descriptor - spc4r34 7.5.5.1 */
+ ns->mode_select_num_blocks =
+ (parm_list[idx + 1] << 16) +
+ (parm_list[idx + 2] << 8) +
+ (parm_list[idx + 3]);
+
+ ns->mode_select_block_len =
+ (parm_list[idx + 5] << 16) +
+ (parm_list[idx + 6] << 8) +
+ (parm_list[idx + 7]);
+ } else {
+ /* Long LBA Block Descriptor - sbc3r27 6.4.2.3 */
+ ns->mode_select_num_blocks =
+ (((u64)parm_list[idx + 0]) << 56) +
+ (((u64)parm_list[idx + 1]) << 48) +
+ (((u64)parm_list[idx + 2]) << 40) +
+ (((u64)parm_list[idx + 3]) << 32) +
+ (((u64)parm_list[idx + 4]) << 24) +
+ (((u64)parm_list[idx + 5]) << 16) +
+ (((u64)parm_list[idx + 6]) << 8) +
+ ((u64)parm_list[idx + 7]);
+
+ ns->mode_select_block_len =
+ (parm_list[idx + 12] << 24) +
+ (parm_list[idx + 13] << 16) +
+ (parm_list[idx + 14] << 8) +
+ (parm_list[idx + 15]);
+ }
+}
+
+static u16 nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *mode_page, u8 page_code)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_dev *dev = ns->dev;
+ unsigned dword11;
+
+ switch (page_code) {
+ case MODE_PAGE_CACHING:
+ dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0);
+ nvme_sc = nvme_set_features(dev, NVME_FEAT_VOLATILE_WC, dword11,
+ 0, NULL);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ break;
+ if (nvme_sc) {
+ res = nvme_sc;
+ break;
+ }
+ break;
+ case MODE_PAGE_CONTROL:
+ break;
+ case MODE_PAGE_POWER_CONDITION:
+ /* Verify the OS is not trying to set timers */
+ if ((mode_page[2] & 0x01) != 0 || (mode_page[3] & 0x0F) != 0) {
+ res = nvme_trans_completion(hdr,
+ SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST,
+ SCSI_ASC_INVALID_PARAMETER,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ if (!res)
+ res = SNTI_INTERNAL_ERROR;
+ break;
+ }
+ break;
+ default:
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ if (!res)
+ res = SNTI_INTERNAL_ERROR;
+ break;
+ }
+
+ return res;
+}
+
+static int nvme_trans_modesel_data(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd, u16 parm_list_len, u8 pf,
+ u8 sp, u8 cdb10)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u8 *parm_list;
+ u16 bd_len;
+ u8 llbaa = 0;
+ u16 index, saved_index;
+ u8 page_code;
+ u16 mp_size;
+
+ /* Get parm list from data-in/out buffer */
+ parm_list = kmalloc(parm_list_len, GFP_KERNEL);
+ if (parm_list == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
+
+ res = nvme_trans_copy_from_user(hdr, parm_list, parm_list_len);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out_mem;
+
+ nvme_trans_modesel_get_bd_len(parm_list, cdb10, &bd_len, &llbaa);
+ index = (cdb10) ? (MODE_SELECT_10_MPH_SIZE) : (MODE_SELECT_6_MPH_SIZE);
+
+ if (bd_len != 0) {
+ /* Block Descriptors present, parse */
+ nvme_trans_modesel_save_bd(ns, parm_list, index, bd_len, llbaa);
+ index += bd_len;
+ }
+ saved_index = index;
+
+ /* Multiple mode pages may be present; iterate through all */
+ /* In 1st Iteration, don't do NVME Command, only check for CDB errors */
+ do {
+ page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK;
+ mp_size = parm_list[index + 1] + 2;
+ if ((page_code != MODE_PAGE_CACHING) &&
+ (page_code != MODE_PAGE_CONTROL) &&
+ (page_code != MODE_PAGE_POWER_CONDITION)) {
+ res = nvme_trans_completion(hdr,
+ SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST,
+ SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out_mem;
+ }
+ index += mp_size;
+ } while (index < parm_list_len);
+
+ /* In 2nd Iteration, do the NVME Commands */
+ index = saved_index;
+ do {
+ page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK;
+ mp_size = parm_list[index + 1] + 2;
+ res = nvme_trans_modesel_get_mp(ns, hdr, &parm_list[index],
+ page_code);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ break;
+ index += mp_size;
+ } while (index < parm_list_len);
+
+ out_mem:
+ kfree(parm_list);
+ out:
+ return res;
+}
+
+/* Format Unit Helper Functions */
+
+static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_dev *dev = ns->dev;
+ dma_addr_t dma_addr;
+ void *mem;
+ struct nvme_id_ns *id_ns;
+ u8 flbas;
+
+ /*
+ * SCSI Expects a MODE SELECT would have been issued prior to
+ * a FORMAT UNIT, and the block size and number would be used
+ * from the block descriptor in it. If a MODE SELECT had not
+ * been issued, FORMAT shall use the current values for both.
+ */
+
+ if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) {
+ mem = dma_alloc_coherent(&dev->pci_dev->dev,
+ sizeof(struct nvme_id_ns), &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
+ /* nvme ns identify */
+ nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_dma;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_dma;
+ }
+ id_ns = mem;
+
+ if (ns->mode_select_num_blocks == 0)
+ ns->mode_select_num_blocks = le64_to_cpu(id_ns->ncap);
+ if (ns->mode_select_block_len == 0) {
+ flbas = (id_ns->flbas) & 0x0F;
+ ns->mode_select_block_len =
+ (1 << (id_ns->lbaf[flbas].ds));
+ }
+ out_dma:
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+ mem, dma_addr);
+ }
+ out:
+ return res;
+}
+
+static int nvme_trans_fmt_get_parm_header(struct sg_io_hdr *hdr, u8 len,
+ u8 format_prot_info, u8 *nvme_pf_code)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u8 *parm_list;
+ u8 pf_usage, pf_code;
+
+ parm_list = kmalloc(len, GFP_KERNEL);
+ if (parm_list == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
+ res = nvme_trans_copy_from_user(hdr, parm_list, len);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out_mem;
+
+ if ((parm_list[FORMAT_UNIT_IMMED_OFFSET] &
+ FORMAT_UNIT_IMMED_MASK) != 0) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out_mem;
+ }
+
+ if (len == FORMAT_UNIT_LONG_PARM_LIST_LEN &&
+ (parm_list[FORMAT_UNIT_PROT_INT_OFFSET] & 0x0F) != 0) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out_mem;
+ }
+ pf_usage = parm_list[FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET] &
+ FORMAT_UNIT_PROT_FIELD_USAGE_MASK;
+ pf_code = (pf_usage << 2) | format_prot_info;
+ switch (pf_code) {
+ case 0:
+ *nvme_pf_code = 0;
+ break;
+ case 2:
+ *nvme_pf_code = 1;
+ break;
+ case 3:
+ *nvme_pf_code = 2;
+ break;
+ case 7:
+ *nvme_pf_code = 3;
+ break;
+ default:
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ break;
+ }
+
+ out_mem:
+ kfree(parm_list);
+ out:
+ return res;
+}
+
+static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 prot_info)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_dev *dev = ns->dev;
+ dma_addr_t dma_addr;
+ void *mem;
+ struct nvme_id_ns *id_ns;
+ u8 i;
+ u8 flbas, nlbaf;
+ u8 selected_lbaf = 0xFF;
+ u32 cdw10 = 0;
+ struct nvme_command c;
+
+ /* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */
+ mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
+ /* nvme ns identify */
+ nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_dma;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_dma;
+ }
+ id_ns = mem;
+ flbas = (id_ns->flbas) & 0x0F;
+ nlbaf = id_ns->nlbaf;
+
+ for (i = 0; i < nlbaf; i++) {
+ if (ns->mode_select_block_len == (1 << (id_ns->lbaf[i].ds))) {
+ selected_lbaf = i;
+ break;
+ }
+ }
+ if (selected_lbaf > 0x0F) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ }
+ if (ns->mode_select_num_blocks != le64_to_cpu(id_ns->ncap)) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ }
+
+ cdw10 |= prot_info << 5;
+ cdw10 |= selected_lbaf & 0x0F;
+ memset(&c, 0, sizeof(c));
+ c.format.opcode = nvme_admin_format_nvm;
+ c.format.nsid = cpu_to_le32(ns->ns_id);
+ c.format.cdw10 = cpu_to_le32(cdw10);
+
+ nvme_sc = nvme_submit_admin_cmd(dev, &c, NULL);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_dma;
+ if (nvme_sc)
+ res = nvme_sc;
+
+ out_dma:
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
+ dma_addr);
+ out:
+ return res;
+}
+
+/* Read/Write Helper Functions */
+
+static inline void nvme_trans_get_io_cdb6(u8 *cmd,
+ struct nvme_trans_io_cdb *cdb_info)
+{
+ cdb_info->fua = 0;
+ cdb_info->prot_info = 0;
+ cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_6_CDB_LBA_OFFSET) &
+ IO_6_CDB_LBA_MASK;
+ cdb_info->xfer_len = GET_U8_FROM_CDB(cmd, IO_6_CDB_TX_LEN_OFFSET);
+
+ /* sbc3r27 sec 5.32 - TRANSFER LEN of 0 implies a 256 Block transfer */
+ if (cdb_info->xfer_len == 0)
+ cdb_info->xfer_len = IO_6_DEFAULT_TX_LEN;
+}
+
+static inline void nvme_trans_get_io_cdb10(u8 *cmd,
+ struct nvme_trans_io_cdb *cdb_info)
+{
+ cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_10_CDB_FUA_OFFSET) &
+ IO_CDB_FUA_MASK;
+ cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_10_CDB_WP_OFFSET) &
+ IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT;
+ cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_10_CDB_LBA_OFFSET);
+ cdb_info->xfer_len = GET_U16_FROM_CDB(cmd, IO_10_CDB_TX_LEN_OFFSET);
+}
+
+static inline void nvme_trans_get_io_cdb12(u8 *cmd,
+ struct nvme_trans_io_cdb *cdb_info)
+{
+ cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_12_CDB_FUA_OFFSET) &
+ IO_CDB_FUA_MASK;
+ cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_12_CDB_WP_OFFSET) &
+ IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT;
+ cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_12_CDB_LBA_OFFSET);
+ cdb_info->xfer_len = GET_U32_FROM_CDB(cmd, IO_12_CDB_TX_LEN_OFFSET);
+}
+
+static inline void nvme_trans_get_io_cdb16(u8 *cmd,
+ struct nvme_trans_io_cdb *cdb_info)
+{
+ cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_16_CDB_FUA_OFFSET) &
+ IO_CDB_FUA_MASK;
+ cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_16_CDB_WP_OFFSET) &
+ IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT;
+ cdb_info->lba = GET_U64_FROM_CDB(cmd, IO_16_CDB_LBA_OFFSET);
+ cdb_info->xfer_len = GET_U32_FROM_CDB(cmd, IO_16_CDB_TX_LEN_OFFSET);
+}
+
+static inline u32 nvme_trans_io_get_num_cmds(struct sg_io_hdr *hdr,
+ struct nvme_trans_io_cdb *cdb_info,
+ u32 max_blocks)
+{
+ /* If using iovecs, send one nvme command per vector */
+ if (hdr->iovec_count > 0)
+ return hdr->iovec_count;
+ else if (cdb_info->xfer_len > max_blocks)
+ return ((cdb_info->xfer_len - 1) / max_blocks) + 1;
+ else
+ return 1;
+}
+
+static u16 nvme_trans_io_get_control(struct nvme_ns *ns,
+ struct nvme_trans_io_cdb *cdb_info)
+{
+ u16 control = 0;
+
+ /* When Protection information support is added, implement here */
+
+ if (cdb_info->fua > 0)
+ control |= NVME_RW_FUA;
+
+ return control;
+}
+
+static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ struct nvme_trans_io_cdb *cdb_info, u8 is_write)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_dev *dev = ns->dev;
+ struct nvme_queue *nvmeq;
+ u32 num_cmds;
+ struct nvme_iod *iod;
+ u64 unit_len;
+ u64 unit_num_blocks; /* Number of blocks to xfer in each nvme cmd */
+ u32 retcode;
+ u32 i = 0;
+ u64 nvme_offset = 0;
+ void __user *next_mapping_addr;
+ struct nvme_command c;
+ u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read);
+ u16 control;
+ u32 max_blocks = nvme_block_nr(ns, dev->max_hw_sectors);
+
+ num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks);
+
+ /*
+ * This loop handles two cases.
+ * First, when an SGL is used in the form of an iovec list:
+ * - Use iov_base as the next mapping address for the nvme command_id
+ * - Use iov_len as the data transfer length for the command.
+ * Second, when we have a single buffer
+ * - If larger than max_blocks, split into chunks, offset
+ * each nvme command accordingly.
+ */
+ for (i = 0; i < num_cmds; i++) {
+ memset(&c, 0, sizeof(c));
+ if (hdr->iovec_count > 0) {
+ struct sg_iovec sgl;
+
+ retcode = copy_from_user(&sgl, hdr->dxferp +
+ i * sizeof(struct sg_iovec),
+ sizeof(struct sg_iovec));
+ if (retcode)
+ return -EFAULT;
+ unit_len = sgl.iov_len;
+ unit_num_blocks = unit_len >> ns->lba_shift;
+ next_mapping_addr = sgl.iov_base;
+ } else {
+ unit_num_blocks = min((u64)max_blocks,
+ (cdb_info->xfer_len - nvme_offset));
+ unit_len = unit_num_blocks << ns->lba_shift;
+ next_mapping_addr = hdr->dxferp +
+ ((1 << ns->lba_shift) * nvme_offset);
+ }
+
+ c.rw.opcode = opcode;
+ c.rw.nsid = cpu_to_le32(ns->ns_id);
+ c.rw.slba = cpu_to_le64(cdb_info->lba + nvme_offset);
+ c.rw.length = cpu_to_le16(unit_num_blocks - 1);
+ control = nvme_trans_io_get_control(ns, cdb_info);
+ c.rw.control = cpu_to_le16(control);
+
+ iod = nvme_map_user_pages(dev,
+ (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
+ (unsigned long)next_mapping_addr, unit_len);
+ if (IS_ERR(iod)) {
+ res = PTR_ERR(iod);
+ goto out;
+ }
+ retcode = nvme_setup_prps(dev, &c.common, iod, unit_len,
+ GFP_KERNEL);
+ if (retcode != unit_len) {
+ nvme_unmap_user_pages(dev,
+ (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
+ iod);
+ nvme_free_iod(dev, iod);
+ res = -ENOMEM;
+ goto out;
+ }
+
+ nvme_offset += unit_num_blocks;
+
+ nvmeq = get_nvmeq(dev);
+ /*
+ * Since nvme_submit_sync_cmd sleeps, we can't keep
+ * preemption disabled. We may be preempted at any
+ * point, and be rescheduled to a different CPU. That
+ * will cause cacheline bouncing, but no additional
+ * races since q_lock already protects against other
+ * CPUs.
+ */
+ put_nvmeq(nvmeq);
+ nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL,
+ NVME_IO_TIMEOUT);
+ if (nvme_sc != NVME_SC_SUCCESS) {
+ nvme_unmap_user_pages(dev,
+ (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
+ iod);
+ nvme_free_iod(dev, iod);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ goto out;
+ }
+ nvme_unmap_user_pages(dev,
+ (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
+ iod);
+ nvme_free_iod(dev, iod);
+ }
+ res = nvme_trans_status_code(hdr, NVME_SC_SUCCESS);
+
+ out:
+ return res;
+}
+
+
+/* SCSI Command Translation Functions */
+
+static int nvme_trans_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 is_write,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ struct nvme_trans_io_cdb cdb_info;
+ u8 opcode = cmd[0];
+ u64 xfer_bytes;
+ u64 sum_iov_len = 0;
+ struct sg_iovec sgl;
+ int i;
+ size_t not_copied;
+
+ /* Extract Fields from CDB */
+ switch (opcode) {
+ case WRITE_6:
+ case READ_6:
+ nvme_trans_get_io_cdb6(cmd, &cdb_info);
+ break;
+ case WRITE_10:
+ case READ_10:
+ nvme_trans_get_io_cdb10(cmd, &cdb_info);
+ break;
+ case WRITE_12:
+ case READ_12:
+ nvme_trans_get_io_cdb12(cmd, &cdb_info);
+ break;
+ case WRITE_16:
+ case READ_16:
+ nvme_trans_get_io_cdb16(cmd, &cdb_info);
+ break;
+ default:
+ /* Will never really reach here */
+ res = SNTI_INTERNAL_ERROR;
+ goto out;
+ }
+
+ /* Calculate total length of transfer (in bytes) */
+ if (hdr->iovec_count > 0) {
+ for (i = 0; i < hdr->iovec_count; i++) {
+ not_copied = copy_from_user(&sgl, hdr->dxferp +
+ i * sizeof(struct sg_iovec),
+ sizeof(struct sg_iovec));
+ if (not_copied)
+ return -EFAULT;
+ sum_iov_len += sgl.iov_len;
+ /* IO vector sizes should be multiples of block size */
+ if (sgl.iov_len % (1 << ns->lba_shift) != 0) {
+ res = nvme_trans_completion(hdr,
+ SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST,
+ SCSI_ASC_INVALID_PARAMETER,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out;
+ }
+ }
+ } else {
+ sum_iov_len = hdr->dxfer_len;
+ }
+
+ /* As Per sg ioctl howto, if the lengths differ, use the lower one */
+ xfer_bytes = min(((u64)hdr->dxfer_len), sum_iov_len);
+
+ /* If block count and actual data buffer size dont match, error out */
+ if (xfer_bytes != (cdb_info.xfer_len << ns->lba_shift)) {
+ res = -EINVAL;
+ goto out;
+ }
+
+ /* Check for 0 length transfer - it is not illegal */
+ if (cdb_info.xfer_len == 0)
+ goto out;
+
+ /* Send NVMe IO Command(s) */
+ res = nvme_trans_do_nvme_io(ns, hdr, &cdb_info, is_write);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out;
+
+ out:
+ return res;
+}
+
+static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u8 evpd;
+ u8 page_code;
+ int alloc_len;
+ u8 *inq_response;
+
+ evpd = GET_INQ_EVPD_BIT(cmd);
+ page_code = GET_INQ_PAGE_CODE(cmd);
+ alloc_len = GET_INQ_ALLOC_LENGTH(cmd);
+
+ inq_response = kmalloc(STANDARD_INQUIRY_LENGTH, GFP_KERNEL);
+ if (inq_response == NULL) {
+ res = -ENOMEM;
+ goto out_mem;
+ }
+
+ if (evpd == 0) {
+ if (page_code == INQ_STANDARD_INQUIRY_PAGE) {
+ res = nvme_trans_standard_inquiry_page(ns, hdr,
+ inq_response, alloc_len);
+ } else {
+ res = nvme_trans_completion(hdr,
+ SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST,
+ SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ }
+ } else {
+ switch (page_code) {
+ case VPD_SUPPORTED_PAGES:
+ res = nvme_trans_supported_vpd_pages(ns, hdr,
+ inq_response, alloc_len);
+ break;
+ case VPD_SERIAL_NUMBER:
+ res = nvme_trans_unit_serial_page(ns, hdr, inq_response,
+ alloc_len);
+ break;
+ case VPD_DEVICE_IDENTIFIERS:
+ res = nvme_trans_device_id_page(ns, hdr, inq_response,
+ alloc_len);
+ break;
+ case VPD_EXTENDED_INQUIRY:
+ res = nvme_trans_ext_inq_page(ns, hdr, alloc_len);
+ break;
+ case VPD_BLOCK_DEV_CHARACTERISTICS:
+ res = nvme_trans_bdev_char_page(ns, hdr, alloc_len);
+ break;
+ default:
+ res = nvme_trans_completion(hdr,
+ SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST,
+ SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ break;
+ }
+ }
+ kfree(inq_response);
+ out_mem:
+ return res;
+}
+
+static int nvme_trans_log_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u16 alloc_len;
+ u8 sp;
+ u8 pc;
+ u8 page_code;
+
+ sp = GET_U8_FROM_CDB(cmd, LOG_SENSE_CDB_SP_OFFSET);
+ if (sp != LOG_SENSE_CDB_SP_NOT_ENABLED) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out;
+ }
+ pc = GET_U8_FROM_CDB(cmd, LOG_SENSE_CDB_PC_OFFSET);
+ page_code = pc & LOG_SENSE_CDB_PAGE_CODE_MASK;
+ pc = (pc & LOG_SENSE_CDB_PC_MASK) >> LOG_SENSE_CDB_PC_SHIFT;
+ if (pc != LOG_SENSE_CDB_PC_CUMULATIVE_VALUES) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out;
+ }
+ alloc_len = GET_U16_FROM_CDB(cmd, LOG_SENSE_CDB_ALLOC_LENGTH_OFFSET);
+ switch (page_code) {
+ case LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE:
+ res = nvme_trans_log_supp_pages(ns, hdr, alloc_len);
+ break;
+ case LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE:
+ res = nvme_trans_log_info_exceptions(ns, hdr, alloc_len);
+ break;
+ case LOG_PAGE_TEMPERATURE_PAGE:
+ res = nvme_trans_log_temperature(ns, hdr, alloc_len);
+ break;
+ default:
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ break;
+ }
+
+ out:
+ return res;
+}
+
+static int nvme_trans_mode_select(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u8 cdb10 = 0;
+ u16 parm_list_len;
+ u8 page_format;
+ u8 save_pages;
+
+ page_format = GET_U8_FROM_CDB(cmd, MODE_SELECT_CDB_PAGE_FORMAT_OFFSET);
+ page_format &= MODE_SELECT_CDB_PAGE_FORMAT_MASK;
+
+ save_pages = GET_U8_FROM_CDB(cmd, MODE_SELECT_CDB_SAVE_PAGES_OFFSET);
+ save_pages &= MODE_SELECT_CDB_SAVE_PAGES_MASK;
+
+ if (GET_OPCODE(cmd) == MODE_SELECT) {
+ parm_list_len = GET_U8_FROM_CDB(cmd,
+ MODE_SELECT_6_CDB_PARAM_LIST_LENGTH_OFFSET);
+ } else {
+ parm_list_len = GET_U16_FROM_CDB(cmd,
+ MODE_SELECT_10_CDB_PARAM_LIST_LENGTH_OFFSET);
+ cdb10 = 1;
+ }
+
+ if (parm_list_len != 0) {
+ /*
+ * According to SPC-4 r24, a paramter list length field of 0
+ * shall not be considered an error
+ */
+ res = nvme_trans_modesel_data(ns, hdr, cmd, parm_list_len,
+ page_format, save_pages, cdb10);
+ }
+
+ return res;
+}
+
+static int nvme_trans_mode_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u16 alloc_len;
+ u8 cdb10 = 0;
+ u8 page_code;
+ u8 pc;
+
+ if (GET_OPCODE(cmd) == MODE_SENSE) {
+ alloc_len = GET_U8_FROM_CDB(cmd, MODE_SENSE6_ALLOC_LEN_OFFSET);
+ } else {
+ alloc_len = GET_U16_FROM_CDB(cmd,
+ MODE_SENSE10_ALLOC_LEN_OFFSET);
+ cdb10 = 1;
+ }
+
+ pc = GET_U8_FROM_CDB(cmd, MODE_SENSE_PAGE_CONTROL_OFFSET) &
+ MODE_SENSE_PAGE_CONTROL_MASK;
+ if (pc != MODE_SENSE_PC_CURRENT_VALUES) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out;
+ }
+
+ page_code = GET_U8_FROM_CDB(cmd, MODE_SENSE_PAGE_CODE_OFFSET) &
+ MODE_SENSE_PAGE_CODE_MASK;
+ switch (page_code) {
+ case MODE_PAGE_CACHING:
+ res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
+ cdb10,
+ &nvme_trans_fill_caching_page,
+ MODE_PAGE_CACHING_LEN);
+ break;
+ case MODE_PAGE_CONTROL:
+ res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
+ cdb10,
+ &nvme_trans_fill_control_page,
+ MODE_PAGE_CONTROL_LEN);
+ break;
+ case MODE_PAGE_POWER_CONDITION:
+ res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
+ cdb10,
+ &nvme_trans_fill_pow_cnd_page,
+ MODE_PAGE_POW_CND_LEN);
+ break;
+ case MODE_PAGE_INFO_EXCEP:
+ res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
+ cdb10,
+ &nvme_trans_fill_inf_exc_page,
+ MODE_PAGE_INF_EXC_LEN);
+ break;
+ case MODE_PAGE_RETURN_ALL:
+ res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
+ cdb10,
+ &nvme_trans_fill_all_pages,
+ MODE_PAGE_ALL_LEN);
+ break;
+ default:
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ break;
+ }
+
+ out:
+ return res;
+}
+
+static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ u32 alloc_len = READ_CAP_10_RESP_SIZE;
+ u32 resp_size = READ_CAP_10_RESP_SIZE;
+ u32 xfer_len;
+ u8 cdb16;
+ struct nvme_dev *dev = ns->dev;
+ dma_addr_t dma_addr;
+ void *mem;
+ struct nvme_id_ns *id_ns;
+ u8 *response;
+
+ cdb16 = IS_READ_CAP_16(cmd);
+ if (cdb16) {
+ alloc_len = GET_READ_CAP_16_ALLOC_LENGTH(cmd);
+ resp_size = READ_CAP_16_RESP_SIZE;
+ }
+
+ mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
+ /* nvme ns identify */
+ nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_dma;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_dma;
+ }
+ id_ns = mem;
+
+ response = kmalloc(resp_size, GFP_KERNEL);
+ if (response == NULL) {
+ res = -ENOMEM;
+ goto out_dma;
+ }
+ memset(response, 0, resp_size);
+ nvme_trans_fill_read_cap(response, id_ns, cdb16);
+
+ xfer_len = min(alloc_len, resp_size);
+ res = nvme_trans_copy_to_user(hdr, response, xfer_len);
+
+ kfree(response);
+ out_dma:
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
+ dma_addr);
+ out:
+ return res;
+}
+
+static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ u32 alloc_len, xfer_len, resp_size;
+ u8 select_report;
+ u8 *response;
+ struct nvme_dev *dev = ns->dev;
+ dma_addr_t dma_addr;
+ void *mem;
+ struct nvme_id_ctrl *id_ctrl;
+ u32 ll_length, lun_id;
+ u8 lun_id_offset = REPORT_LUNS_FIRST_LUN_OFFSET;
+ __be32 tmp_len;
+
+ alloc_len = GET_REPORT_LUNS_ALLOC_LENGTH(cmd);
+ select_report = GET_U8_FROM_CDB(cmd, REPORT_LUNS_SR_OFFSET);
+
+ if ((select_report != ALL_LUNS_RETURNED) &&
+ (select_report != ALL_WELL_KNOWN_LUNS_RETURNED) &&
+ (select_report != RESTRICTED_LUNS_RETURNED)) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out;
+ } else {
+ /* NVMe Controller Identify */
+ mem = dma_alloc_coherent(&dev->pci_dev->dev,
+ sizeof(struct nvme_id_ctrl),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
+ nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_dma;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_dma;
+ }
+ id_ctrl = mem;
+ ll_length = le32_to_cpu(id_ctrl->nn) * LUN_ENTRY_SIZE;
+ resp_size = ll_length + LUN_DATA_HEADER_SIZE;
+
+ if (alloc_len < resp_size) {
+ res = nvme_trans_completion(hdr,
+ SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out_dma;
+ }
+
+ response = kmalloc(resp_size, GFP_KERNEL);
+ if (response == NULL) {
+ res = -ENOMEM;
+ goto out_dma;
+ }
+ memset(response, 0, resp_size);
+
+ /* The first LUN ID will always be 0 per the SAM spec */
+ for (lun_id = 0; lun_id < le32_to_cpu(id_ctrl->nn); lun_id++) {
+ /*
+ * Set the LUN Id and then increment to the next LUN
+ * location in the parameter data.
+ */
+ __be64 tmp_id = cpu_to_be64(lun_id);
+ memcpy(&response[lun_id_offset], &tmp_id, sizeof(u64));
+ lun_id_offset += LUN_ENTRY_SIZE;
+ }
+ tmp_len = cpu_to_be32(ll_length);
+ memcpy(response, &tmp_len, sizeof(u32));
+ }
+
+ xfer_len = min(alloc_len, resp_size);
+ res = nvme_trans_copy_to_user(hdr, response, xfer_len);
+
+ kfree(response);
+ out_dma:
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem,
+ dma_addr);
+ out:
+ return res;
+}
+
+static int nvme_trans_request_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u8 alloc_len, xfer_len, resp_size;
+ u8 desc_format;
+ u8 *response;
+
+ alloc_len = GET_REQUEST_SENSE_ALLOC_LENGTH(cmd);
+ desc_format = GET_U8_FROM_CDB(cmd, REQUEST_SENSE_DESC_OFFSET);
+ desc_format &= REQUEST_SENSE_DESC_MASK;
+
+ resp_size = ((desc_format) ? (DESC_FMT_SENSE_DATA_SIZE) :
+ (FIXED_FMT_SENSE_DATA_SIZE));
+ response = kmalloc(resp_size, GFP_KERNEL);
+ if (response == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
+ memset(response, 0, resp_size);
+
+ if (desc_format == DESCRIPTOR_FORMAT_SENSE_DATA_TYPE) {
+ /* Descriptor Format Sense Data */
+ response[0] = DESC_FORMAT_SENSE_DATA;
+ response[1] = NO_SENSE;
+ /* TODO How is LOW POWER CONDITION ON handled? (byte 2) */
+ response[2] = SCSI_ASC_NO_SENSE;
+ response[3] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ /* SDAT_OVFL = 0 | Additional Sense Length = 0 */
+ } else {
+ /* Fixed Format Sense Data */
+ response[0] = FIXED_SENSE_DATA;
+ /* Byte 1 = Obsolete */
+ response[2] = NO_SENSE; /* FM, EOM, ILI, SDAT_OVFL = 0 */
+ /* Bytes 3-6 - Information - set to zero */
+ response[7] = FIXED_SENSE_DATA_ADD_LENGTH;
+ /* Bytes 8-11 - Cmd Specific Information - set to zero */
+ response[12] = SCSI_ASC_NO_SENSE;
+ response[13] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ /* Byte 14 = Field Replaceable Unit Code = 0 */
+ /* Bytes 15-17 - SKSV=0; Sense Key Specific = 0 */
+ }
+
+ xfer_len = min(alloc_len, resp_size);
+ res = nvme_trans_copy_to_user(hdr, response, xfer_len);
+
+ kfree(response);
+ out:
+ return res;
+}
+
+static int nvme_trans_security_protocol(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+}
+
+static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_queue *nvmeq;
+ struct nvme_command c;
+ u8 immed, pcmod, pc, no_flush, start;
+
+ immed = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_IMMED_OFFSET);
+ pcmod = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_POWER_COND_MOD_OFFSET);
+ pc = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_POWER_COND_OFFSET);
+ no_flush = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_NO_FLUSH_OFFSET);
+ start = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_START_OFFSET);
+
+ immed &= START_STOP_UNIT_CDB_IMMED_MASK;
+ pcmod &= START_STOP_UNIT_CDB_POWER_COND_MOD_MASK;
+ pc = (pc & START_STOP_UNIT_CDB_POWER_COND_MASK) >> NIBBLE_SHIFT;
+ no_flush &= START_STOP_UNIT_CDB_NO_FLUSH_MASK;
+ start &= START_STOP_UNIT_CDB_START_MASK;
+
+ if (immed != 0) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ } else {
+ if (no_flush == 0) {
+ /* Issue NVME FLUSH command prior to START STOP UNIT */
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = nvme_cmd_flush;
+ c.common.nsid = cpu_to_le32(ns->ns_id);
+
+ nvmeq = get_nvmeq(ns->dev);
+ put_nvmeq(nvmeq);
+ nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out;
+ }
+ }
+ /* Setup the expected power state transition */
+ res = nvme_trans_power_state(ns, hdr, pc, pcmod, start);
+ }
+
+ out:
+ return res;
+}
+
+static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr, u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_command c;
+ struct nvme_queue *nvmeq;
+
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = nvme_cmd_flush;
+ c.common.nsid = cpu_to_le32(ns->ns_id);
+
+ nvmeq = get_nvmeq(ns->dev);
+ put_nvmeq(nvmeq);
+ nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out;
+ if (nvme_sc)
+ res = nvme_sc;
+
+ out:
+ return res;
+}
+
+static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u8 parm_hdr_len = 0;
+ u8 nvme_pf_code = 0;
+ u8 format_prot_info, long_list, format_data;
+
+ format_prot_info = GET_U8_FROM_CDB(cmd,
+ FORMAT_UNIT_CDB_FORMAT_PROT_INFO_OFFSET);
+ long_list = GET_U8_FROM_CDB(cmd, FORMAT_UNIT_CDB_LONG_LIST_OFFSET);
+ format_data = GET_U8_FROM_CDB(cmd, FORMAT_UNIT_CDB_FORMAT_DATA_OFFSET);
+
+ format_prot_info = (format_prot_info &
+ FORMAT_UNIT_CDB_FORMAT_PROT_INFO_MASK) >>
+ FORMAT_UNIT_CDB_FORMAT_PROT_INFO_SHIFT;
+ long_list &= FORMAT_UNIT_CDB_LONG_LIST_MASK;
+ format_data &= FORMAT_UNIT_CDB_FORMAT_DATA_MASK;
+
+ if (format_data != 0) {
+ if (format_prot_info != 0) {
+ if (long_list == 0)
+ parm_hdr_len = FORMAT_UNIT_SHORT_PARM_LIST_LEN;
+ else
+ parm_hdr_len = FORMAT_UNIT_LONG_PARM_LIST_LEN;
+ }
+ } else if (format_data == 0 && format_prot_info != 0) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out;
+ }
+
+ /* Get parm header from data-in/out buffer */
+ /*
+ * According to the translation spec, the only fields in the parameter
+ * list we are concerned with are in the header. So allocate only that.
+ */
+ if (parm_hdr_len > 0) {
+ res = nvme_trans_fmt_get_parm_header(hdr, parm_hdr_len,
+ format_prot_info, &nvme_pf_code);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out;
+ }
+
+ /* Attempt to activate any previously downloaded firmware image */
+ res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw, 0, 0, 0);
+
+ /* Determine Block size and count and send format command */
+ res = nvme_trans_fmt_set_blk_size_count(ns, hdr);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out;
+
+ res = nvme_trans_fmt_send_cmd(ns, hdr, nvme_pf_code);
+
+ out:
+ return res;
+}
+
+static int nvme_trans_test_unit_ready(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ struct nvme_dev *dev = ns->dev;
+
+ if (!(readl(&dev->bar->csts) & NVME_CSTS_RDY))
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ NOT_READY, SCSI_ASC_LUN_NOT_READY,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ else
+ res = nvme_trans_completion(hdr, SAM_STAT_GOOD, NO_SENSE, 0, 0);
+
+ return res;
+}
+
+static int nvme_trans_write_buffer(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u32 buffer_offset, parm_list_length;
+ u8 buffer_id, mode;
+
+ parm_list_length =
+ GET_U24_FROM_CDB(cmd, WRITE_BUFFER_CDB_PARM_LIST_LENGTH_OFFSET);
+ if (parm_list_length % BYTES_TO_DWORDS != 0) {
+ /* NVMe expects Firmware file to be a whole number of DWORDS */
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out;
+ }
+ buffer_id = GET_U8_FROM_CDB(cmd, WRITE_BUFFER_CDB_BUFFER_ID_OFFSET);
+ if (buffer_id > NVME_MAX_FIRMWARE_SLOT) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out;
+ }
+ mode = GET_U8_FROM_CDB(cmd, WRITE_BUFFER_CDB_MODE_OFFSET) &
+ WRITE_BUFFER_CDB_MODE_MASK;
+ buffer_offset =
+ GET_U24_FROM_CDB(cmd, WRITE_BUFFER_CDB_BUFFER_OFFSET_OFFSET);
+
+ switch (mode) {
+ case DOWNLOAD_SAVE_ACTIVATE:
+ res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_download_fw,
+ parm_list_length, buffer_offset,
+ buffer_id);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out;
+ res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw,
+ parm_list_length, buffer_offset,
+ buffer_id);
+ break;
+ case DOWNLOAD_SAVE_DEFER_ACTIVATE:
+ res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_download_fw,
+ parm_list_length, buffer_offset,
+ buffer_id);
+ break;
+ case ACTIVATE_DEFERRED_MICROCODE:
+ res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw,
+ parm_list_length, buffer_offset,
+ buffer_id);
+ break;
+ default:
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ break;
+ }
+
+ out:
+ return res;
+}
+
+struct scsi_unmap_blk_desc {
+ __be64 slba;
+ __be32 nlb;
+ u32 resv;
+};
+
+struct scsi_unmap_parm_list {
+ __be16 unmap_data_len;
+ __be16 unmap_blk_desc_data_len;
+ u32 resv;
+ struct scsi_unmap_blk_desc desc[0];
+};
+
+static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ struct nvme_dev *dev = ns->dev;
+ struct scsi_unmap_parm_list *plist;
+ struct nvme_dsm_range *range;
+ struct nvme_queue *nvmeq;
+ struct nvme_command c;
+ int i, nvme_sc, res = -ENOMEM;
+ u16 ndesc, list_len;
+ dma_addr_t dma_addr;
+
+ list_len = GET_U16_FROM_CDB(cmd, UNMAP_CDB_PARAM_LIST_LENGTH_OFFSET);
+ if (!list_len)
+ return -EINVAL;
+
+ plist = kmalloc(list_len, GFP_KERNEL);
+ if (!plist)
+ return -ENOMEM;
+
+ res = nvme_trans_copy_from_user(hdr, plist, list_len);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out;
+
+ ndesc = be16_to_cpu(plist->unmap_blk_desc_data_len) >> 4;
+ if (!ndesc || ndesc > 256) {
+ res = -EINVAL;
+ goto out;
+ }
+
+ range = dma_alloc_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),
+ &dma_addr, GFP_KERNEL);
+ if (!range)
+ goto out;
+
+ for (i = 0; i < ndesc; i++) {
+ range[i].nlb = cpu_to_le32(be32_to_cpu(plist->desc[i].nlb));
+ range[i].slba = cpu_to_le64(be64_to_cpu(plist->desc[i].slba));
+ range[i].cattr = 0;
+ }
+
+ memset(&c, 0, sizeof(c));
+ c.dsm.opcode = nvme_cmd_dsm;
+ c.dsm.nsid = cpu_to_le32(ns->ns_id);
+ c.dsm.prp1 = cpu_to_le64(dma_addr);
+ c.dsm.nr = cpu_to_le32(ndesc - 1);
+ c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
+
+ nvmeq = get_nvmeq(dev);
+ put_nvmeq(nvmeq);
+
+ nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+
+ dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),
+ range, dma_addr);
+ out:
+ kfree(plist);
+ return res;
+}
+
+static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
+{
+ u8 cmd[BLK_MAX_CDB];
+ int retcode;
+ unsigned int opcode;
+
+ if (hdr->cmdp == NULL)
+ return -EMSGSIZE;
+ if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
+ return -EFAULT;
+
+ opcode = cmd[0];
+
+ switch (opcode) {
+ case READ_6:
+ case READ_10:
+ case READ_12:
+ case READ_16:
+ retcode = nvme_trans_io(ns, hdr, 0, cmd);
+ break;
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_12:
+ case WRITE_16:
+ retcode = nvme_trans_io(ns, hdr, 1, cmd);
+ break;
+ case INQUIRY:
+ retcode = nvme_trans_inquiry(ns, hdr, cmd);
+ break;
+ case LOG_SENSE:
+ retcode = nvme_trans_log_sense(ns, hdr, cmd);
+ break;
+ case MODE_SELECT:
+ case MODE_SELECT_10:
+ retcode = nvme_trans_mode_select(ns, hdr, cmd);
+ break;
+ case MODE_SENSE:
+ case MODE_SENSE_10:
+ retcode = nvme_trans_mode_sense(ns, hdr, cmd);
+ break;
+ case READ_CAPACITY:
+ retcode = nvme_trans_read_capacity(ns, hdr, cmd);
+ break;
+ case SERVICE_ACTION_IN:
+ if (IS_READ_CAP_16(cmd))
+ retcode = nvme_trans_read_capacity(ns, hdr, cmd);
+ else
+ goto out;
+ break;
+ case REPORT_LUNS:
+ retcode = nvme_trans_report_luns(ns, hdr, cmd);
+ break;
+ case REQUEST_SENSE:
+ retcode = nvme_trans_request_sense(ns, hdr, cmd);
+ break;
+ case SECURITY_PROTOCOL_IN:
+ case SECURITY_PROTOCOL_OUT:
+ retcode = nvme_trans_security_protocol(ns, hdr, cmd);
+ break;
+ case START_STOP:
+ retcode = nvme_trans_start_stop(ns, hdr, cmd);
+ break;
+ case SYNCHRONIZE_CACHE:
+ retcode = nvme_trans_synchronize_cache(ns, hdr, cmd);
+ break;
+ case FORMAT_UNIT:
+ retcode = nvme_trans_format_unit(ns, hdr, cmd);
+ break;
+ case TEST_UNIT_READY:
+ retcode = nvme_trans_test_unit_ready(ns, hdr, cmd);
+ break;
+ case WRITE_BUFFER:
+ retcode = nvme_trans_write_buffer(ns, hdr, cmd);
+ break;
+ case UNMAP:
+ retcode = nvme_trans_unmap(ns, hdr, cmd);
+ break;
+ default:
+ out:
+ retcode = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ break;
+ }
+ return retcode;
+}
+
+int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr)
+{
+ struct sg_io_hdr hdr;
+ int retcode;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (copy_from_user(&hdr, u_hdr, sizeof(hdr)))
+ return -EFAULT;
+ if (hdr.interface_id != 'S')
+ return -EINVAL;
+ if (hdr.cmd_len > BLK_MAX_CDB)
+ return -EINVAL;
+
+ retcode = nvme_scsi_translate(ns, &hdr);
+ if (retcode < 0)
+ return retcode;
+ if (retcode > 0)
+ retcode = SNTI_TRANSLATION_SUCCESS;
+ if (copy_to_user(u_hdr, &hdr, sizeof(sg_io_hdr_t)) > 0)
+ return -EFAULT;
+
+ return retcode;
+}
+
+int nvme_sg_get_version_num(int __user *ip)
+{
+ return put_user(sg_version_num, ip);
+}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index aeaea32..e992489 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -63,8 +63,6 @@ config INTEL_IOATDMA
depends on PCI && X86
select DMA_ENGINE
select DCA
- select ASYNC_TX_DISABLE_PQ_VAL_DMA
- select ASYNC_TX_DISABLE_XOR_VAL_DMA
help
Enable support for the Intel(R) I/OAT DMA engine present
in recent Intel Xeon chipsets.
@@ -174,15 +172,7 @@ config TEGRA20_APB_DMA
This DMA controller transfers data from memory to peripheral fifo
or vice versa. It does not support memory to memory data transfer.
-
-
-config SH_DMAE
- tristate "Renesas SuperH DMAC support"
- depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE)
- depends on !SH_DMA_API
- select DMA_ENGINE
- help
- Enable support for the Renesas SuperH DMA controllers.
+source "drivers/dma/sh/Kconfig"
config COH901318
bool "ST-Ericsson COH901318 DMA support"
@@ -328,6 +318,10 @@ config DMA_ENGINE
config DMA_VIRTUAL_CHANNELS
tristate
+config DMA_ACPI
+ def_bool y
+ depends on ACPI
+
config DMA_OF
def_bool y
depends on OF
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 488e3ff..a2b0df5 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -3,6 +3,7 @@ ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
+obj-$(CONFIG_DMA_ACPI) += acpi-dma.o
obj-$(CONFIG_DMA_OF) += of-dma.o
obj-$(CONFIG_NET_DMA) += iovlock.o
@@ -18,7 +19,7 @@ obj-$(CONFIG_DW_DMAC) += dw_dmac.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_MX3_IPU) += ipu/
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
-obj-$(CONFIG_SH_DMAE) += sh/
+obj-$(CONFIG_SH_DMAE_BASE) += sh/
obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
new file mode 100644
index 0000000..ba6fc62
--- /dev/null
+++ b/drivers/dma/acpi-dma.c
@@ -0,0 +1,279 @@
+/*
+ * ACPI helpers for DMA request / controller
+ *
+ * Based on of-dma.c
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/acpi_dma.h>
+
+static LIST_HEAD(acpi_dma_list);
+static DEFINE_MUTEX(acpi_dma_lock);
+
+/**
+ * acpi_dma_controller_register - Register a DMA controller to ACPI DMA helpers
+ * @dev: struct device of DMA controller
+ * @acpi_dma_xlate: translation function which converts a dma specifier
+ * into a dma_chan structure
+ * @data pointer to controller specific data to be used by
+ * translation function
+ *
+ * Returns 0 on success or appropriate errno value on error.
+ *
+ * Allocated memory should be freed with appropriate acpi_dma_controller_free()
+ * call.
+ */
+int acpi_dma_controller_register(struct device *dev,
+ struct dma_chan *(*acpi_dma_xlate)
+ (struct acpi_dma_spec *, struct acpi_dma *),
+ void *data)
+{
+ struct acpi_device *adev;
+ struct acpi_dma *adma;
+
+ if (!dev || !acpi_dma_xlate)
+ return -EINVAL;
+
+ /* Check if the device was enumerated by ACPI */
+ if (!ACPI_HANDLE(dev))
+ return -EINVAL;
+
+ if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev))
+ return -EINVAL;
+
+ adma = kzalloc(sizeof(*adma), GFP_KERNEL);
+ if (!adma)
+ return -ENOMEM;
+
+ adma->dev = dev;
+ adma->acpi_dma_xlate = acpi_dma_xlate;
+ adma->data = data;
+
+ /* Now queue acpi_dma controller structure in list */
+ mutex_lock(&acpi_dma_lock);
+ list_add_tail(&adma->dma_controllers, &acpi_dma_list);
+ mutex_unlock(&acpi_dma_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_dma_controller_register);
+
+/**
+ * acpi_dma_controller_free - Remove a DMA controller from ACPI DMA helpers list
+ * @dev: struct device of DMA controller
+ *
+ * Memory allocated by acpi_dma_controller_register() is freed here.
+ */
+int acpi_dma_controller_free(struct device *dev)
+{
+ struct acpi_dma *adma;
+
+ if (!dev)
+ return -EINVAL;
+
+ mutex_lock(&acpi_dma_lock);
+
+ list_for_each_entry(adma, &acpi_dma_list, dma_controllers)
+ if (adma->dev == dev) {
+ list_del(&adma->dma_controllers);
+ mutex_unlock(&acpi_dma_lock);
+ kfree(adma);
+ return 0;
+ }
+
+ mutex_unlock(&acpi_dma_lock);
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(acpi_dma_controller_free);
+
+static void devm_acpi_dma_release(struct device *dev, void *res)
+{
+ acpi_dma_controller_free(dev);
+}
+
+/**
+ * devm_acpi_dma_controller_register - resource managed acpi_dma_controller_register()
+ * @dev: device that is registering this DMA controller
+ * @acpi_dma_xlate: translation function
+ * @data pointer to controller specific data
+ *
+ * Managed acpi_dma_controller_register(). DMA controller registered by this
+ * function are automatically freed on driver detach. See
+ * acpi_dma_controller_register() for more information.
+ */
+int devm_acpi_dma_controller_register(struct device *dev,
+ struct dma_chan *(*acpi_dma_xlate)
+ (struct acpi_dma_spec *, struct acpi_dma *),
+ void *data)
+{
+ void *res;
+ int ret;
+
+ res = devres_alloc(devm_acpi_dma_release, 0, GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ ret = acpi_dma_controller_register(dev, acpi_dma_xlate, data);
+ if (ret) {
+ devres_free(res);
+ return ret;
+ }
+ devres_add(dev, res);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_register);
+
+/**
+ * devm_acpi_dma_controller_free - resource managed acpi_dma_controller_free()
+ *
+ * Unregister a DMA controller registered with
+ * devm_acpi_dma_controller_register(). Normally this function will not need to
+ * be called and the resource management code will ensure that the resource is
+ * freed.
+ */
+void devm_acpi_dma_controller_free(struct device *dev)
+{
+ WARN_ON(devres_destroy(dev, devm_acpi_dma_release, NULL, NULL));
+}
+EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
+
+struct acpi_dma_parser_data {
+ struct acpi_dma_spec dma_spec;
+ size_t index;
+ size_t n;
+};
+
+/**
+ * acpi_dma_parse_fixed_dma - Parse FixedDMA ACPI resources to a DMA specifier
+ * @res: struct acpi_resource to get FixedDMA resources from
+ * @data: pointer to a helper struct acpi_dma_parser_data
+ */
+static int acpi_dma_parse_fixed_dma(struct acpi_resource *res, void *data)
+{
+ struct acpi_dma_parser_data *pdata = data;
+
+ if (res->type == ACPI_RESOURCE_TYPE_FIXED_DMA) {
+ struct acpi_resource_fixed_dma *dma = &res->data.fixed_dma;
+
+ if (pdata->n++ == pdata->index) {
+ pdata->dma_spec.chan_id = dma->channels;
+ pdata->dma_spec.slave_id = dma->request_lines;
+ }
+ }
+
+ /* Tell the ACPI core to skip this resource */
+ return 1;
+}
+
+/**
+ * acpi_dma_request_slave_chan_by_index - Get the DMA slave channel
+ * @dev: struct device to get DMA request from
+ * @index: index of FixedDMA descriptor for @dev
+ *
+ * Returns pointer to appropriate dma channel on success or NULL on error.
+ */
+struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
+ size_t index)
+{
+ struct acpi_dma_parser_data pdata;
+ struct acpi_dma_spec *dma_spec = &pdata.dma_spec;
+ struct list_head resource_list;
+ struct acpi_device *adev;
+ struct acpi_dma *adma;
+ struct dma_chan *chan = NULL;
+
+ /* Check if the device was enumerated by ACPI */
+ if (!dev || !ACPI_HANDLE(dev))
+ return NULL;
+
+ if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev))
+ return NULL;
+
+ memset(&pdata, 0, sizeof(pdata));
+ pdata.index = index;
+
+ /* Initial values for the request line and channel */
+ dma_spec->chan_id = -1;
+ dma_spec->slave_id = -1;
+
+ INIT_LIST_HEAD(&resource_list);
+ acpi_dev_get_resources(adev, &resource_list,
+ acpi_dma_parse_fixed_dma, &pdata);
+ acpi_dev_free_resource_list(&resource_list);
+
+ if (dma_spec->slave_id < 0 || dma_spec->chan_id < 0)
+ return NULL;
+
+ mutex_lock(&acpi_dma_lock);
+
+ list_for_each_entry(adma, &acpi_dma_list, dma_controllers) {
+ dma_spec->dev = adma->dev;
+ chan = adma->acpi_dma_xlate(dma_spec, adma);
+ if (chan)
+ break;
+ }
+
+ mutex_unlock(&acpi_dma_lock);
+ return chan;
+}
+EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index);
+
+/**
+ * acpi_dma_request_slave_chan_by_name - Get the DMA slave channel
+ * @dev: struct device to get DMA request from
+ * @name: represents corresponding FixedDMA descriptor for @dev
+ *
+ * In order to support both Device Tree and ACPI in a single driver we
+ * translate the names "tx" and "rx" here based on the most common case where
+ * the first FixedDMA descriptor is TX and second is RX.
+ *
+ * Returns pointer to appropriate dma channel on success or NULL on error.
+ */
+struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
+ const char *name)
+{
+ size_t index;
+
+ if (!strcmp(name, "tx"))
+ index = 0;
+ else if (!strcmp(name, "rx"))
+ index = 1;
+ else
+ return NULL;
+
+ return acpi_dma_request_slave_chan_by_index(dev, index);
+}
+EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_name);
+
+/**
+ * acpi_dma_simple_xlate - Simple ACPI DMA engine translation helper
+ * @dma_spec: pointer to ACPI DMA specifier
+ * @adma: pointer to ACPI DMA controller data
+ *
+ * A simple translation function for ACPI based devices. Passes &struct
+ * dma_spec to the DMA controller driver provided filter function. Returns
+ * pointer to the channel if found or %NULL otherwise.
+ */
+struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec,
+ struct acpi_dma *adma)
+{
+ struct acpi_dma_filter_info *info = adma->data;
+
+ if (!info || !info->filter_fn)
+ return NULL;
+
+ return dma_request_channel(info->dma_cap, info->filter_fn, dma_spec);
+}
+EXPORT_SYMBOL_GPL(acpi_dma_simple_xlate);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 88cfc61..e923cda 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_dma.h>
#include "at_hdmac_regs.h"
#include "dmaengine.h"
@@ -677,7 +678,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
ctrlb |= ATC_DST_ADDR_MODE_FIXED
| ATC_SRC_ADDR_MODE_INCR
| ATC_FC_MEM2PER
- | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF);
+ | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if);
reg = sconfig->dst_addr;
for_each_sg(sgl, sg, sg_len, i) {
struct at_desc *desc;
@@ -716,7 +717,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
ctrlb |= ATC_DST_ADDR_MODE_INCR
| ATC_SRC_ADDR_MODE_FIXED
| ATC_FC_PER2MEM
- | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF);
+ | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if);
reg = sconfig->src_addr;
for_each_sg(sgl, sg, sg_len, i) {
@@ -822,8 +823,8 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
| ATC_SRC_ADDR_MODE_INCR
| ATC_FC_MEM2PER
- | ATC_SIF(AT_DMA_MEM_IF)
- | ATC_DIF(AT_DMA_PER_IF);
+ | ATC_SIF(atchan->mem_if)
+ | ATC_DIF(atchan->per_if);
break;
case DMA_DEV_TO_MEM:
@@ -833,8 +834,8 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
| ATC_SRC_ADDR_MODE_FIXED
| ATC_FC_PER2MEM
- | ATC_SIF(AT_DMA_PER_IF)
- | ATC_DIF(AT_DMA_MEM_IF);
+ | ATC_SIF(atchan->per_if)
+ | ATC_DIF(atchan->mem_if);
break;
default:
@@ -1188,6 +1189,67 @@ static void atc_free_chan_resources(struct dma_chan *chan)
dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
}
+#ifdef CONFIG_OF
+static bool at_dma_filter(struct dma_chan *chan, void *slave)
+{
+ struct at_dma_slave *atslave = slave;
+
+ if (atslave->dma_dev == chan->device->dev) {
+ chan->private = atslave;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *of_dma)
+{
+ struct dma_chan *chan;
+ struct at_dma_chan *atchan;
+ struct at_dma_slave *atslave;
+ dma_cap_mask_t mask;
+ unsigned int per_id;
+ struct platform_device *dmac_pdev;
+
+ if (dma_spec->args_count != 2)
+ return NULL;
+
+ dmac_pdev = of_find_device_by_node(dma_spec->np);
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL);
+ if (!atslave)
+ return NULL;
+ /*
+ * We can fill both SRC_PER and DST_PER, one of these fields will be
+ * ignored depending on DMA transfer direction.
+ */
+ per_id = dma_spec->args[1];
+ atslave->cfg = ATC_FIFOCFG_HALFFIFO | ATC_DST_H2SEL_HW
+ | ATC_SRC_H2SEL_HW | ATC_DST_PER(per_id)
+ | ATC_SRC_PER(per_id);
+ atslave->dma_dev = &dmac_pdev->dev;
+
+ chan = dma_request_channel(mask, at_dma_filter, atslave);
+ if (!chan)
+ return NULL;
+
+ atchan = to_at_dma_chan(chan);
+ atchan->per_if = dma_spec->args[0] & 0xff;
+ atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff;
+
+ return chan;
+}
+#else
+static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *of_dma)
+{
+ return NULL;
+}
+#endif
/*-- Module Management -----------------------------------------------*/
@@ -1342,6 +1404,8 @@ static int __init at_dma_probe(struct platform_device *pdev)
for (i = 0; i < plat_dat->nr_channels; i++) {
struct at_dma_chan *atchan = &atdma->chan[i];
+ atchan->mem_if = AT_DMA_MEM_IF;
+ atchan->per_if = AT_DMA_PER_IF;
atchan->chan_common.device = &atdma->dma_common;
dma_cookie_init(&atchan->chan_common);
list_add_tail(&atchan->chan_common.device_node,
@@ -1388,8 +1452,25 @@ static int __init at_dma_probe(struct platform_device *pdev)
dma_async_device_register(&atdma->dma_common);
+ /*
+ * Do not return an error if the dmac node is not present in order to
+ * not break the existing way of requesting channel with
+ * dma_request_channel().
+ */
+ if (pdev->dev.of_node) {
+ err = of_dma_controller_register(pdev->dev.of_node,
+ at_dma_xlate, atdma);
+ if (err) {
+ dev_err(&pdev->dev, "could not register of_dma_controller\n");
+ goto err_of_dma_controller_register;
+ }
+ }
+
return 0;
+err_of_dma_controller_register:
+ dma_async_device_unregister(&atdma->dma_common);
+ dma_pool_destroy(atdma->dma_desc_pool);
err_pool_create:
platform_set_drvdata(pdev, NULL);
free_irq(platform_get_irq(pdev, 0), atdma);
@@ -1406,7 +1487,7 @@ err_kfree:
return err;
}
-static int __exit at_dma_remove(struct platform_device *pdev)
+static int at_dma_remove(struct platform_device *pdev)
{
struct at_dma *atdma = platform_get_drvdata(pdev);
struct dma_chan *chan, *_chan;
@@ -1564,7 +1645,7 @@ static const struct dev_pm_ops at_dma_dev_pm_ops = {
};
static struct platform_driver at_dma_driver = {
- .remove = __exit_p(at_dma_remove),
+ .remove = at_dma_remove,
.shutdown = at_dma_shutdown,
.id_table = atdma_devtypes,
.driver = {
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 0eb3c13..c604d26 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -220,6 +220,8 @@ enum atc_status {
* @device: parent device
* @ch_regs: memory mapped register base
* @mask: channel index in a mask
+ * @per_if: peripheral interface
+ * @mem_if: memory interface
* @status: transmit status information from irq/prep* functions
* to tasklet (use atomic operations)
* @tasklet: bottom half to finish transaction work
@@ -238,6 +240,8 @@ struct at_dma_chan {
struct at_dma *device;
void __iomem *ch_regs;
u8 mask;
+ u8 per_if;
+ u8 mem_if;
unsigned long status;
struct tasklet_struct tasklet;
u32 save_cfg;
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 797940e..3b23061 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -2748,7 +2748,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
return err;
}
-static int __exit coh901318_remove(struct platform_device *pdev)
+static int coh901318_remove(struct platform_device *pdev)
{
struct coh901318_base *base = platform_get_drvdata(pdev);
@@ -2760,7 +2760,7 @@ static int __exit coh901318_remove(struct platform_device *pdev)
static struct platform_driver coh901318_driver = {
- .remove = __exit_p(coh901318_remove),
+ .remove = coh901318_remove,
.driver = {
.name = "coh901318",
},
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index b2728d6..93f7992 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -62,6 +62,8 @@
#include <linux/rculist.h>
#include <linux/idr.h>
#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/acpi_dma.h>
#include <linux/of_dma.h>
static DEFINE_MUTEX(dma_list_mutex);
@@ -174,7 +176,8 @@ static struct class dma_devclass = {
#define dma_device_satisfies_mask(device, mask) \
__dma_device_satisfies_mask((device), &(mask))
static int
-__dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
+__dma_device_satisfies_mask(struct dma_device *device,
+ const dma_cap_mask_t *want)
{
dma_cap_mask_t has;
@@ -463,7 +466,8 @@ static void dma_channel_rebalance(void)
}
}
-static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev,
+static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
+ struct dma_device *dev,
dma_filter_fn fn, void *fn_param)
{
struct dma_chan *chan;
@@ -505,7 +509,8 @@ static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_devic
* @fn: optional callback to disposition available channels
* @fn_param: opaque parameter to pass to dma_filter_fn
*/
-struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
+struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
+ dma_filter_fn fn, void *fn_param)
{
struct dma_device *device, *_d;
struct dma_chan *chan = NULL;
@@ -555,12 +560,16 @@ EXPORT_SYMBOL_GPL(__dma_request_channel);
* @dev: pointer to client device structure
* @name: slave channel name
*/
-struct dma_chan *dma_request_slave_channel(struct device *dev, char *name)
+struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name)
{
/* If device-tree is present get slave info from here */
if (dev->of_node)
return of_dma_request_slave_channel(dev->of_node, name);
+ /* If device was enumerated by ACPI get slave info from here */
+ if (ACPI_HANDLE(dev))
+ return acpi_dma_request_slave_chan_by_name(dev, name);
+
return NULL;
}
EXPORT_SYMBOL_GPL(dma_request_slave_channel);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index a2c8904..d8ce4ec 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -2,6 +2,7 @@
* DMA Engine test module
*
* Copyright (C) 2007 Atmel Corporation
+ * Copyright (C) 2013 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -18,6 +19,10 @@
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/wait.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/seq_file.h>
static unsigned int test_buf_size = 16384;
module_param(test_buf_size, uint, S_IRUGO);
@@ -61,6 +66,9 @@ module_param(timeout, uint, S_IRUGO);
MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
"Pass -1 for infinite timeout");
+/* Maximum amount of mismatched bytes in buffer to print */
+#define MAX_ERROR_COUNT 32
+
/*
* Initialization patterns. All bytes in the source buffer has bit 7
* set, all bytes in the destination buffer has bit 7 cleared.
@@ -78,13 +86,65 @@ MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
#define PATTERN_OVERWRITE 0x20
#define PATTERN_COUNT_MASK 0x1f
+enum dmatest_error_type {
+ DMATEST_ET_OK,
+ DMATEST_ET_MAP_SRC,
+ DMATEST_ET_MAP_DST,
+ DMATEST_ET_PREP,
+ DMATEST_ET_SUBMIT,
+ DMATEST_ET_TIMEOUT,
+ DMATEST_ET_DMA_ERROR,
+ DMATEST_ET_DMA_IN_PROGRESS,
+ DMATEST_ET_VERIFY,
+ DMATEST_ET_VERIFY_BUF,
+};
+
+struct dmatest_verify_buffer {
+ unsigned int index;
+ u8 expected;
+ u8 actual;
+};
+
+struct dmatest_verify_result {
+ unsigned int error_count;
+ struct dmatest_verify_buffer data[MAX_ERROR_COUNT];
+ u8 pattern;
+ bool is_srcbuf;
+};
+
+struct dmatest_thread_result {
+ struct list_head node;
+ unsigned int n;
+ unsigned int src_off;
+ unsigned int dst_off;
+ unsigned int len;
+ enum dmatest_error_type type;
+ union {
+ unsigned long data;
+ dma_cookie_t cookie;
+ enum dma_status status;
+ int error;
+ struct dmatest_verify_result *vr;
+ };
+};
+
+struct dmatest_result {
+ struct list_head node;
+ char *name;
+ struct list_head results;
+};
+
+struct dmatest_info;
+
struct dmatest_thread {
struct list_head node;
+ struct dmatest_info *info;
struct task_struct *task;
struct dma_chan *chan;
u8 **srcs;
u8 **dsts;
enum dma_transaction_type type;
+ bool done;
};
struct dmatest_chan {
@@ -93,25 +153,69 @@ struct dmatest_chan {
struct list_head threads;
};
-/*
- * These are protected by dma_list_mutex since they're only used by
- * the DMA filter function callback
+/**
+ * struct dmatest_params - test parameters.
+ * @buf_size: size of the memcpy test buffer
+ * @channel: bus ID of the channel to test
+ * @device: bus ID of the DMA Engine to test
+ * @threads_per_chan: number of threads to start per channel
+ * @max_channels: maximum number of channels to use
+ * @iterations: iterations before stopping test
+ * @xor_sources: number of xor source buffers
+ * @pq_sources: number of p+q source buffers
+ * @timeout: transfer timeout in msec, -1 for infinite timeout
*/
-static LIST_HEAD(dmatest_channels);
-static unsigned int nr_channels;
+struct dmatest_params {
+ unsigned int buf_size;
+ char channel[20];
+ char device[20];
+ unsigned int threads_per_chan;
+ unsigned int max_channels;
+ unsigned int iterations;
+ unsigned int xor_sources;
+ unsigned int pq_sources;
+ int timeout;
+};
-static bool dmatest_match_channel(struct dma_chan *chan)
+/**
+ * struct dmatest_info - test information.
+ * @params: test parameters
+ * @lock: access protection to the fields of this structure
+ */
+struct dmatest_info {
+ /* Test parameters */
+ struct dmatest_params params;
+
+ /* Internal state */
+ struct list_head channels;
+ unsigned int nr_channels;
+ struct mutex lock;
+
+ /* debugfs related stuff */
+ struct dentry *root;
+ struct dmatest_params dbgfs_params;
+
+ /* Test results */
+ struct list_head results;
+ struct mutex results_lock;
+};
+
+static struct dmatest_info test_info;
+
+static bool dmatest_match_channel(struct dmatest_params *params,
+ struct dma_chan *chan)
{
- if (test_channel[0] == '\0')
+ if (params->channel[0] == '\0')
return true;
- return strcmp(dma_chan_name(chan), test_channel) == 0;
+ return strcmp(dma_chan_name(chan), params->channel) == 0;
}
-static bool dmatest_match_device(struct dma_device *device)
+static bool dmatest_match_device(struct dmatest_params *params,
+ struct dma_device *device)
{
- if (test_device[0] == '\0')
+ if (params->device[0] == '\0')
return true;
- return strcmp(dev_name(device->dev), test_device) == 0;
+ return strcmp(dev_name(device->dev), params->device) == 0;
}
static unsigned long dmatest_random(void)
@@ -122,7 +226,8 @@ static unsigned long dmatest_random(void)
return buf;
}
-static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len)
+static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
+ unsigned int buf_size)
{
unsigned int i;
u8 *buf;
@@ -133,13 +238,14 @@ static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len)
for ( ; i < start + len; i++)
buf[i] = PATTERN_SRC | PATTERN_COPY
| (~i & PATTERN_COUNT_MASK);
- for ( ; i < test_buf_size; i++)
+ for ( ; i < buf_size; i++)
buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
buf++;
}
}
-static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len)
+static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
+ unsigned int buf_size)
{
unsigned int i;
u8 *buf;
@@ -150,40 +256,14 @@ static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len)
for ( ; i < start + len; i++)
buf[i] = PATTERN_DST | PATTERN_OVERWRITE
| (~i & PATTERN_COUNT_MASK);
- for ( ; i < test_buf_size; i++)
+ for ( ; i < buf_size; i++)
buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
}
}
-static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
- unsigned int counter, bool is_srcbuf)
-{
- u8 diff = actual ^ pattern;
- u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
- const char *thread_name = current->comm;
-
- if (is_srcbuf)
- pr_warning("%s: srcbuf[0x%x] overwritten!"
- " Expected %02x, got %02x\n",
- thread_name, index, expected, actual);
- else if ((pattern & PATTERN_COPY)
- && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
- pr_warning("%s: dstbuf[0x%x] not copied!"
- " Expected %02x, got %02x\n",
- thread_name, index, expected, actual);
- else if (diff & PATTERN_SRC)
- pr_warning("%s: dstbuf[0x%x] was copied!"
- " Expected %02x, got %02x\n",
- thread_name, index, expected, actual);
- else
- pr_warning("%s: dstbuf[0x%x] mismatch!"
- " Expected %02x, got %02x\n",
- thread_name, index, expected, actual);
-}
-
-static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
- unsigned int end, unsigned int counter, u8 pattern,
- bool is_srcbuf)
+static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs,
+ unsigned int start, unsigned int end, unsigned int counter,
+ u8 pattern, bool is_srcbuf)
{
unsigned int i;
unsigned int error_count = 0;
@@ -191,6 +271,7 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
u8 expected;
u8 *buf;
unsigned int counter_orig = counter;
+ struct dmatest_verify_buffer *vb;
for (; (buf = *bufs); bufs++) {
counter = counter_orig;
@@ -198,18 +279,21 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
actual = buf[i];
expected = pattern | (~counter & PATTERN_COUNT_MASK);
if (actual != expected) {
- if (error_count < 32)
- dmatest_mismatch(actual, pattern, i,
- counter, is_srcbuf);
+ if (error_count < MAX_ERROR_COUNT && vr) {
+ vb = &vr->data[error_count];
+ vb->index = i;
+ vb->expected = expected;
+ vb->actual = actual;
+ }
error_count++;
}
counter++;
}
}
- if (error_count > 32)
+ if (error_count > MAX_ERROR_COUNT)
pr_warning("%s: %u errors suppressed\n",
- current->comm, error_count - 32);
+ current->comm, error_count - MAX_ERROR_COUNT);
return error_count;
}
@@ -249,6 +333,170 @@ static unsigned int min_odd(unsigned int x, unsigned int y)
return val % 2 ? val : val - 1;
}
+static char *verify_result_get_one(struct dmatest_verify_result *vr,
+ unsigned int i)
+{
+ struct dmatest_verify_buffer *vb = &vr->data[i];
+ u8 diff = vb->actual ^ vr->pattern;
+ static char buf[512];
+ char *msg;
+
+ if (vr->is_srcbuf)
+ msg = "srcbuf overwritten!";
+ else if ((vr->pattern & PATTERN_COPY)
+ && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
+ msg = "dstbuf not copied!";
+ else if (diff & PATTERN_SRC)
+ msg = "dstbuf was copied!";
+ else
+ msg = "dstbuf mismatch!";
+
+ snprintf(buf, sizeof(buf) - 1, "%s [0x%x] Expected %02x, got %02x", msg,
+ vb->index, vb->expected, vb->actual);
+
+ return buf;
+}
+
+static char *thread_result_get(const char *name,
+ struct dmatest_thread_result *tr)
+{
+ static const char * const messages[] = {
+ [DMATEST_ET_OK] = "No errors",
+ [DMATEST_ET_MAP_SRC] = "src mapping error",
+ [DMATEST_ET_MAP_DST] = "dst mapping error",
+ [DMATEST_ET_PREP] = "prep error",
+ [DMATEST_ET_SUBMIT] = "submit error",
+ [DMATEST_ET_TIMEOUT] = "test timed out",
+ [DMATEST_ET_DMA_ERROR] =
+ "got completion callback (DMA_ERROR)",
+ [DMATEST_ET_DMA_IN_PROGRESS] =
+ "got completion callback (DMA_IN_PROGRESS)",
+ [DMATEST_ET_VERIFY] = "errors",
+ [DMATEST_ET_VERIFY_BUF] = "verify errors",
+ };
+ static char buf[512];
+
+ snprintf(buf, sizeof(buf) - 1,
+ "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)",
+ name, tr->n, messages[tr->type], tr->src_off, tr->dst_off,
+ tr->len, tr->data);
+
+ return buf;
+}
+
+static int thread_result_add(struct dmatest_info *info,
+ struct dmatest_result *r, enum dmatest_error_type type,
+ unsigned int n, unsigned int src_off, unsigned int dst_off,
+ unsigned int len, unsigned long data)
+{
+ struct dmatest_thread_result *tr;
+
+ tr = kzalloc(sizeof(*tr), GFP_KERNEL);
+ if (!tr)
+ return -ENOMEM;
+
+ tr->type = type;
+ tr->n = n;
+ tr->src_off = src_off;
+ tr->dst_off = dst_off;
+ tr->len = len;
+ tr->data = data;
+
+ mutex_lock(&info->results_lock);
+ list_add_tail(&tr->node, &r->results);
+ mutex_unlock(&info->results_lock);
+
+ pr_warn("%s\n", thread_result_get(r->name, tr));
+ return 0;
+}
+
+static unsigned int verify_result_add(struct dmatest_info *info,
+ struct dmatest_result *r, unsigned int n,
+ unsigned int src_off, unsigned int dst_off, unsigned int len,
+ u8 **bufs, int whence, unsigned int counter, u8 pattern,
+ bool is_srcbuf)
+{
+ struct dmatest_verify_result *vr;
+ unsigned int error_count;
+ unsigned int buf_off = is_srcbuf ? src_off : dst_off;
+ unsigned int start, end;
+
+ if (whence < 0) {
+ start = 0;
+ end = buf_off;
+ } else if (whence > 0) {
+ start = buf_off + len;
+ end = info->params.buf_size;
+ } else {
+ start = buf_off;
+ end = buf_off + len;
+ }
+
+ vr = kmalloc(sizeof(*vr), GFP_KERNEL);
+ if (!vr) {
+ pr_warn("dmatest: No memory to store verify result\n");
+ return dmatest_verify(NULL, bufs, start, end, counter, pattern,
+ is_srcbuf);
+ }
+
+ vr->pattern = pattern;
+ vr->is_srcbuf = is_srcbuf;
+
+ error_count = dmatest_verify(vr, bufs, start, end, counter, pattern,
+ is_srcbuf);
+ if (error_count) {
+ vr->error_count = error_count;
+ thread_result_add(info, r, DMATEST_ET_VERIFY_BUF, n, src_off,
+ dst_off, len, (unsigned long)vr);
+ return error_count;
+ }
+
+ kfree(vr);
+ return 0;
+}
+
+static void result_free(struct dmatest_info *info, const char *name)
+{
+ struct dmatest_result *r, *_r;
+
+ mutex_lock(&info->results_lock);
+ list_for_each_entry_safe(r, _r, &info->results, node) {
+ struct dmatest_thread_result *tr, *_tr;
+
+ if (name && strcmp(r->name, name))
+ continue;
+
+ list_for_each_entry_safe(tr, _tr, &r->results, node) {
+ if (tr->type == DMATEST_ET_VERIFY_BUF)
+ kfree(tr->vr);
+ list_del(&tr->node);
+ kfree(tr);
+ }
+
+ kfree(r->name);
+ list_del(&r->node);
+ kfree(r);
+ }
+
+ mutex_unlock(&info->results_lock);
+}
+
+static struct dmatest_result *result_init(struct dmatest_info *info,
+ const char *name)
+{
+ struct dmatest_result *r;
+
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (r) {
+ r->name = kstrdup(name, GFP_KERNEL);
+ INIT_LIST_HEAD(&r->results);
+ mutex_lock(&info->results_lock);
+ list_add_tail(&r->node, &info->results);
+ mutex_unlock(&info->results_lock);
+ }
+ return r;
+}
+
/*
* This function repeatedly tests DMA transfers of various lengths and
* offsets for a given operation type until it is told to exit by
@@ -268,6 +516,8 @@ static int dmatest_func(void *data)
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
struct dmatest_thread *thread = data;
struct dmatest_done done = { .wait = &done_wait };
+ struct dmatest_info *info;
+ struct dmatest_params *params;
struct dma_chan *chan;
struct dma_device *dev;
const char *thread_name;
@@ -278,11 +528,12 @@ static int dmatest_func(void *data)
dma_cookie_t cookie;
enum dma_status status;
enum dma_ctrl_flags flags;
- u8 pq_coefs[pq_sources + 1];
+ u8 *pq_coefs = NULL;
int ret;
int src_cnt;
int dst_cnt;
int i;
+ struct dmatest_result *result;
thread_name = current->comm;
set_freezable();
@@ -290,28 +541,39 @@ static int dmatest_func(void *data)
ret = -ENOMEM;
smp_rmb();
+ info = thread->info;
+ params = &info->params;
chan = thread->chan;
dev = chan->device;
if (thread->type == DMA_MEMCPY)
src_cnt = dst_cnt = 1;
else if (thread->type == DMA_XOR) {
/* force odd to ensure dst = src */
- src_cnt = min_odd(xor_sources | 1, dev->max_xor);
+ src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
dst_cnt = 1;
} else if (thread->type == DMA_PQ) {
/* force odd to ensure dst = src */
- src_cnt = min_odd(pq_sources | 1, dma_maxpq(dev, 0));
+ src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
dst_cnt = 2;
+
+ pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL);
+ if (!pq_coefs)
+ goto err_thread_type;
+
for (i = 0; i < src_cnt; i++)
pq_coefs[i] = 1;
} else
+ goto err_thread_type;
+
+ result = result_init(info, thread_name);
+ if (!result)
goto err_srcs;
thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
if (!thread->srcs)
goto err_srcs;
for (i = 0; i < src_cnt; i++) {
- thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL);
+ thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);
if (!thread->srcs[i])
goto err_srcbuf;
}
@@ -321,7 +583,7 @@ static int dmatest_func(void *data)
if (!thread->dsts)
goto err_dsts;
for (i = 0; i < dst_cnt; i++) {
- thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL);
+ thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);
if (!thread->dsts[i])
goto err_dstbuf;
}
@@ -337,7 +599,7 @@ static int dmatest_func(void *data)
| DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE;
while (!kthread_should_stop()
- && !(iterations && total_tests >= iterations)) {
+ && !(params->iterations && total_tests >= params->iterations)) {
struct dma_async_tx_descriptor *tx = NULL;
dma_addr_t dma_srcs[src_cnt];
dma_addr_t dma_dsts[dst_cnt];
@@ -353,24 +615,24 @@ static int dmatest_func(void *data)
else if (thread->type == DMA_PQ)
align = dev->pq_align;
- if (1 << align > test_buf_size) {
+ if (1 << align > params->buf_size) {
pr_err("%u-byte buffer too small for %d-byte alignment\n",
- test_buf_size, 1 << align);
+ params->buf_size, 1 << align);
break;
}
- len = dmatest_random() % test_buf_size + 1;
+ len = dmatest_random() % params->buf_size + 1;
len = (len >> align) << align;
if (!len)
len = 1 << align;
- src_off = dmatest_random() % (test_buf_size - len + 1);
- dst_off = dmatest_random() % (test_buf_size - len + 1);
+ src_off = dmatest_random() % (params->buf_size - len + 1);
+ dst_off = dmatest_random() % (params->buf_size - len + 1);
src_off = (src_off >> align) << align;
dst_off = (dst_off >> align) << align;
- dmatest_init_srcs(thread->srcs, src_off, len);
- dmatest_init_dsts(thread->dsts, dst_off, len);
+ dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size);
+ dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size);
for (i = 0; i < src_cnt; i++) {
u8 *buf = thread->srcs[i] + src_off;
@@ -380,10 +642,10 @@ static int dmatest_func(void *data)
ret = dma_mapping_error(dev->dev, dma_srcs[i]);
if (ret) {
unmap_src(dev->dev, dma_srcs, len, i);
- pr_warn("%s: #%u: mapping error %d with "
- "src_off=0x%x len=0x%x\n",
- thread_name, total_tests - 1, ret,
- src_off, len);
+ thread_result_add(info, result,
+ DMATEST_ET_MAP_SRC,
+ total_tests, src_off, dst_off,
+ len, ret);
failed_tests++;
continue;
}
@@ -391,16 +653,17 @@ static int dmatest_func(void *data)
/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
for (i = 0; i < dst_cnt; i++) {
dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
- test_buf_size,
+ params->buf_size,
DMA_BIDIRECTIONAL);
ret = dma_mapping_error(dev->dev, dma_dsts[i]);
if (ret) {
unmap_src(dev->dev, dma_srcs, len, src_cnt);
- unmap_dst(dev->dev, dma_dsts, test_buf_size, i);
- pr_warn("%s: #%u: mapping error %d with "
- "dst_off=0x%x len=0x%x\n",
- thread_name, total_tests - 1, ret,
- dst_off, test_buf_size);
+ unmap_dst(dev->dev, dma_dsts, params->buf_size,
+ i);
+ thread_result_add(info, result,
+ DMATEST_ET_MAP_DST,
+ total_tests, src_off, dst_off,
+ len, ret);
failed_tests++;
continue;
}
@@ -428,11 +691,11 @@ static int dmatest_func(void *data)
if (!tx) {
unmap_src(dev->dev, dma_srcs, len, src_cnt);
- unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt);
- pr_warning("%s: #%u: prep error with src_off=0x%x "
- "dst_off=0x%x len=0x%x\n",
- thread_name, total_tests - 1,
- src_off, dst_off, len);
+ unmap_dst(dev->dev, dma_dsts, params->buf_size,
+ dst_cnt);
+ thread_result_add(info, result, DMATEST_ET_PREP,
+ total_tests, src_off, dst_off,
+ len, 0);
msleep(100);
failed_tests++;
continue;
@@ -444,18 +707,18 @@ static int dmatest_func(void *data)
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
- pr_warning("%s: #%u: submit error %d with src_off=0x%x "
- "dst_off=0x%x len=0x%x\n",
- thread_name, total_tests - 1, cookie,
- src_off, dst_off, len);
+ thread_result_add(info, result, DMATEST_ET_SUBMIT,
+ total_tests, src_off, dst_off,
+ len, cookie);
msleep(100);
failed_tests++;
continue;
}
dma_async_issue_pending(chan);
- wait_event_freezable_timeout(done_wait, done.done,
- msecs_to_jiffies(timeout));
+ wait_event_freezable_timeout(done_wait,
+ done.done || kthread_should_stop(),
+ msecs_to_jiffies(params->timeout));
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
@@ -468,56 +731,57 @@ static int dmatest_func(void *data)
* free it this time?" dancing. For now, just
* leave it dangling.
*/
- pr_warning("%s: #%u: test timed out\n",
- thread_name, total_tests - 1);
+ thread_result_add(info, result, DMATEST_ET_TIMEOUT,
+ total_tests, src_off, dst_off,
+ len, 0);
failed_tests++;
continue;
} else if (status != DMA_SUCCESS) {
- pr_warning("%s: #%u: got completion callback,"
- " but status is \'%s\'\n",
- thread_name, total_tests - 1,
- status == DMA_ERROR ? "error" : "in progress");
+ enum dmatest_error_type type = (status == DMA_ERROR) ?
+ DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS;
+ thread_result_add(info, result, type,
+ total_tests, src_off, dst_off,
+ len, status);
failed_tests++;
continue;
}
/* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
- unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt);
+ unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt);
error_count = 0;
pr_debug("%s: verifying source buffer...\n", thread_name);
- error_count += dmatest_verify(thread->srcs, 0, src_off,
+ error_count += verify_result_add(info, result, total_tests,
+ src_off, dst_off, len, thread->srcs, -1,
0, PATTERN_SRC, true);
- error_count += dmatest_verify(thread->srcs, src_off,
- src_off + len, src_off,
- PATTERN_SRC | PATTERN_COPY, true);
- error_count += dmatest_verify(thread->srcs, src_off + len,
- test_buf_size, src_off + len,
- PATTERN_SRC, true);
-
- pr_debug("%s: verifying dest buffer...\n",
- thread->task->comm);
- error_count += dmatest_verify(thread->dsts, 0, dst_off,
+ error_count += verify_result_add(info, result, total_tests,
+ src_off, dst_off, len, thread->srcs, 0,
+ src_off, PATTERN_SRC | PATTERN_COPY, true);
+ error_count += verify_result_add(info, result, total_tests,
+ src_off, dst_off, len, thread->srcs, 1,
+ src_off + len, PATTERN_SRC, true);
+
+ pr_debug("%s: verifying dest buffer...\n", thread_name);
+ error_count += verify_result_add(info, result, total_tests,
+ src_off, dst_off, len, thread->dsts, -1,
0, PATTERN_DST, false);
- error_count += dmatest_verify(thread->dsts, dst_off,
- dst_off + len, src_off,
- PATTERN_SRC | PATTERN_COPY, false);
- error_count += dmatest_verify(thread->dsts, dst_off + len,
- test_buf_size, dst_off + len,
- PATTERN_DST, false);
+ error_count += verify_result_add(info, result, total_tests,
+ src_off, dst_off, len, thread->dsts, 0,
+ src_off, PATTERN_SRC | PATTERN_COPY, false);
+ error_count += verify_result_add(info, result, total_tests,
+ src_off, dst_off, len, thread->dsts, 1,
+ dst_off + len, PATTERN_DST, false);
if (error_count) {
- pr_warning("%s: #%u: %u errors with "
- "src_off=0x%x dst_off=0x%x len=0x%x\n",
- thread_name, total_tests - 1, error_count,
- src_off, dst_off, len);
+ thread_result_add(info, result, DMATEST_ET_VERIFY,
+ total_tests, src_off, dst_off,
+ len, error_count);
failed_tests++;
} else {
- pr_debug("%s: #%u: No errors with "
- "src_off=0x%x dst_off=0x%x len=0x%x\n",
- thread_name, total_tests - 1,
- src_off, dst_off, len);
+ thread_result_add(info, result, DMATEST_ET_OK,
+ total_tests, src_off, dst_off,
+ len, 0);
}
}
@@ -532,6 +796,8 @@ err_dsts:
err_srcbuf:
kfree(thread->srcs);
err_srcs:
+ kfree(pq_coefs);
+err_thread_type:
pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
thread_name, total_tests, failed_tests, ret);
@@ -539,7 +805,9 @@ err_srcs:
if (ret)
dmaengine_terminate_all(chan);
- if (iterations > 0)
+ thread->done = true;
+
+ if (params->iterations > 0)
while (!kthread_should_stop()) {
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
interruptible_sleep_on(&wait_dmatest_exit);
@@ -568,8 +836,10 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
kfree(dtc);
}
-static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_type type)
+static int dmatest_add_threads(struct dmatest_info *info,
+ struct dmatest_chan *dtc, enum dma_transaction_type type)
{
+ struct dmatest_params *params = &info->params;
struct dmatest_thread *thread;
struct dma_chan *chan = dtc->chan;
char *op;
@@ -584,7 +854,7 @@ static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_ty
else
return -EINVAL;
- for (i = 0; i < threads_per_chan; i++) {
+ for (i = 0; i < params->threads_per_chan; i++) {
thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
if (!thread) {
pr_warning("dmatest: No memory for %s-%s%u\n",
@@ -592,6 +862,7 @@ static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_ty
break;
}
+ thread->info = info;
thread->chan = dtc->chan;
thread->type = type;
smp_wmb();
@@ -612,7 +883,8 @@ static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_ty
return i;
}
-static int dmatest_add_channel(struct dma_chan *chan)
+static int dmatest_add_channel(struct dmatest_info *info,
+ struct dma_chan *chan)
{
struct dmatest_chan *dtc;
struct dma_device *dma_dev = chan->device;
@@ -629,75 +901,418 @@ static int dmatest_add_channel(struct dma_chan *chan)
INIT_LIST_HEAD(&dtc->threads);
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
- cnt = dmatest_add_threads(dtc, DMA_MEMCPY);
+ cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
thread_count += cnt > 0 ? cnt : 0;
}
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
- cnt = dmatest_add_threads(dtc, DMA_XOR);
+ cnt = dmatest_add_threads(info, dtc, DMA_XOR);
thread_count += cnt > 0 ? cnt : 0;
}
if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
- cnt = dmatest_add_threads(dtc, DMA_PQ);
+ cnt = dmatest_add_threads(info, dtc, DMA_PQ);
thread_count += cnt > 0 ? cnt : 0;
}
pr_info("dmatest: Started %u threads using %s\n",
thread_count, dma_chan_name(chan));
- list_add_tail(&dtc->node, &dmatest_channels);
- nr_channels++;
+ list_add_tail(&dtc->node, &info->channels);
+ info->nr_channels++;
return 0;
}
static bool filter(struct dma_chan *chan, void *param)
{
- if (!dmatest_match_channel(chan) || !dmatest_match_device(chan->device))
+ struct dmatest_params *params = param;
+
+ if (!dmatest_match_channel(params, chan) ||
+ !dmatest_match_device(params, chan->device))
return false;
else
return true;
}
-static int __init dmatest_init(void)
+static int __run_threaded_test(struct dmatest_info *info)
{
dma_cap_mask_t mask;
struct dma_chan *chan;
+ struct dmatest_params *params = &info->params;
int err = 0;
dma_cap_zero(mask);
dma_cap_set(DMA_MEMCPY, mask);
for (;;) {
- chan = dma_request_channel(mask, filter, NULL);
+ chan = dma_request_channel(mask, filter, params);
if (chan) {
- err = dmatest_add_channel(chan);
+ err = dmatest_add_channel(info, chan);
if (err) {
dma_release_channel(chan);
break; /* add_channel failed, punt */
}
} else
break; /* no more channels available */
- if (max_channels && nr_channels >= max_channels)
+ if (params->max_channels &&
+ info->nr_channels >= params->max_channels)
break; /* we have all we need */
}
-
return err;
}
-/* when compiled-in wait for drivers to load first */
-late_initcall(dmatest_init);
-static void __exit dmatest_exit(void)
+#ifndef MODULE
+static int run_threaded_test(struct dmatest_info *info)
+{
+ int ret;
+
+ mutex_lock(&info->lock);
+ ret = __run_threaded_test(info);
+ mutex_unlock(&info->lock);
+ return ret;
+}
+#endif
+
+static void __stop_threaded_test(struct dmatest_info *info)
{
struct dmatest_chan *dtc, *_dtc;
struct dma_chan *chan;
- list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
+ list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
list_del(&dtc->node);
chan = dtc->chan;
dmatest_cleanup_channel(dtc);
- pr_debug("dmatest: dropped channel %s\n",
- dma_chan_name(chan));
+ pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan));
dma_release_channel(chan);
}
+
+ info->nr_channels = 0;
+}
+
+static void stop_threaded_test(struct dmatest_info *info)
+{
+ mutex_lock(&info->lock);
+ __stop_threaded_test(info);
+ mutex_unlock(&info->lock);
+}
+
+static int __restart_threaded_test(struct dmatest_info *info, bool run)
+{
+ struct dmatest_params *params = &info->params;
+ int ret;
+
+ /* Stop any running test first */
+ __stop_threaded_test(info);
+
+ if (run == false)
+ return 0;
+
+ /* Clear results from previous run */
+ result_free(info, NULL);
+
+ /* Copy test parameters */
+ memcpy(params, &info->dbgfs_params, sizeof(*params));
+
+ /* Run test with new parameters */
+ ret = __run_threaded_test(info);
+ if (ret) {
+ __stop_threaded_test(info);
+ pr_err("dmatest: Can't run test\n");
+ }
+
+ return ret;
+}
+
+static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos,
+ const void __user *from, size_t count)
+{
+ char tmp[20];
+ ssize_t len;
+
+ len = simple_write_to_buffer(tmp, sizeof(tmp) - 1, ppos, from, count);
+ if (len >= 0) {
+ tmp[len] = '\0';
+ strlcpy(to, strim(tmp), available);
+ }
+
+ return len;
+}
+
+static ssize_t dtf_read_channel(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dmatest_info *info = file->private_data;
+ return simple_read_from_buffer(buf, count, ppos,
+ info->dbgfs_params.channel,
+ strlen(info->dbgfs_params.channel));
+}
+
+static ssize_t dtf_write_channel(struct file *file, const char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct dmatest_info *info = file->private_data;
+ return dtf_write_string(info->dbgfs_params.channel,
+ sizeof(info->dbgfs_params.channel),
+ ppos, buf, size);
+}
+
+static const struct file_operations dtf_channel_fops = {
+ .read = dtf_read_channel,
+ .write = dtf_write_channel,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static ssize_t dtf_read_device(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dmatest_info *info = file->private_data;
+ return simple_read_from_buffer(buf, count, ppos,
+ info->dbgfs_params.device,
+ strlen(info->dbgfs_params.device));
+}
+
+static ssize_t dtf_write_device(struct file *file, const char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct dmatest_info *info = file->private_data;
+ return dtf_write_string(info->dbgfs_params.device,
+ sizeof(info->dbgfs_params.device),
+ ppos, buf, size);
+}
+
+static const struct file_operations dtf_device_fops = {
+ .read = dtf_read_device,
+ .write = dtf_write_device,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct dmatest_info *info = file->private_data;
+ char buf[3];
+ struct dmatest_chan *dtc;
+ bool alive = false;
+
+ mutex_lock(&info->lock);
+ list_for_each_entry(dtc, &info->channels, node) {
+ struct dmatest_thread *thread;
+
+ list_for_each_entry(thread, &dtc->threads, node) {
+ if (!thread->done) {
+ alive = true;
+ break;
+ }
+ }
+ }
+
+ if (alive) {
+ buf[0] = 'Y';
+ } else {
+ __stop_threaded_test(info);
+ buf[0] = 'N';
+ }
+
+ mutex_unlock(&info->lock);
+ buf[1] = '\n';
+ buf[2] = 0x00;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t dtf_write_run(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct dmatest_info *info = file->private_data;
+ char buf[16];
+ bool bv;
+ int ret = 0;
+
+ if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1))))
+ return -EFAULT;
+
+ if (strtobool(buf, &bv) == 0) {
+ mutex_lock(&info->lock);
+ ret = __restart_threaded_test(info, bv);
+ mutex_unlock(&info->lock);
+ }
+
+ return ret ? ret : count;
+}
+
+static const struct file_operations dtf_run_fops = {
+ .read = dtf_read_run,
+ .write = dtf_write_run,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static int dtf_results_show(struct seq_file *sf, void *data)
+{
+ struct dmatest_info *info = sf->private;
+ struct dmatest_result *result;
+ struct dmatest_thread_result *tr;
+ unsigned int i;
+
+ mutex_lock(&info->results_lock);
+ list_for_each_entry(result, &info->results, node) {
+ list_for_each_entry(tr, &result->results, node) {
+ seq_printf(sf, "%s\n",
+ thread_result_get(result->name, tr));
+ if (tr->type == DMATEST_ET_VERIFY_BUF) {
+ for (i = 0; i < tr->vr->error_count; i++) {
+ seq_printf(sf, "\t%s\n",
+ verify_result_get_one(tr->vr, i));
+ }
+ }
+ }
+ }
+
+ mutex_unlock(&info->results_lock);
+ return 0;
+}
+
+static int dtf_results_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dtf_results_show, inode->i_private);
+}
+
+static const struct file_operations dtf_results_fops = {
+ .open = dtf_results_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int dmatest_register_dbgfs(struct dmatest_info *info)
+{
+ struct dentry *d;
+ struct dmatest_params *params = &info->dbgfs_params;
+ int ret = -ENOMEM;
+
+ d = debugfs_create_dir("dmatest", NULL);
+ if (IS_ERR(d))
+ return PTR_ERR(d);
+ if (!d)
+ goto err_root;
+
+ info->root = d;
+
+ /* Copy initial values */
+ memcpy(params, &info->params, sizeof(*params));
+
+ /* Test parameters */
+
+ d = debugfs_create_u32("test_buf_size", S_IWUSR | S_IRUGO, info->root,
+ (u32 *)&params->buf_size);
+ if (IS_ERR_OR_NULL(d))
+ goto err_node;
+
+ d = debugfs_create_file("channel", S_IRUGO | S_IWUSR, info->root,
+ info, &dtf_channel_fops);
+ if (IS_ERR_OR_NULL(d))
+ goto err_node;
+
+ d = debugfs_create_file("device", S_IRUGO | S_IWUSR, info->root,
+ info, &dtf_device_fops);
+ if (IS_ERR_OR_NULL(d))
+ goto err_node;
+
+ d = debugfs_create_u32("threads_per_chan", S_IWUSR | S_IRUGO, info->root,
+ (u32 *)&params->threads_per_chan);
+ if (IS_ERR_OR_NULL(d))
+ goto err_node;
+
+ d = debugfs_create_u32("max_channels", S_IWUSR | S_IRUGO, info->root,
+ (u32 *)&params->max_channels);
+ if (IS_ERR_OR_NULL(d))
+ goto err_node;
+
+ d = debugfs_create_u32("iterations", S_IWUSR | S_IRUGO, info->root,
+ (u32 *)&params->iterations);
+ if (IS_ERR_OR_NULL(d))
+ goto err_node;
+
+ d = debugfs_create_u32("xor_sources", S_IWUSR | S_IRUGO, info->root,
+ (u32 *)&params->xor_sources);
+ if (IS_ERR_OR_NULL(d))
+ goto err_node;
+
+ d = debugfs_create_u32("pq_sources", S_IWUSR | S_IRUGO, info->root,
+ (u32 *)&params->pq_sources);
+ if (IS_ERR_OR_NULL(d))
+ goto err_node;
+
+ d = debugfs_create_u32("timeout", S_IWUSR | S_IRUGO, info->root,
+ (u32 *)&params->timeout);
+ if (IS_ERR_OR_NULL(d))
+ goto err_node;
+
+ /* Run or stop threaded test */
+ d = debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root,
+ info, &dtf_run_fops);
+ if (IS_ERR_OR_NULL(d))
+ goto err_node;
+
+ /* Results of test in progress */
+ d = debugfs_create_file("results", S_IRUGO, info->root, info,
+ &dtf_results_fops);
+ if (IS_ERR_OR_NULL(d))
+ goto err_node;
+
+ return 0;
+
+err_node:
+ debugfs_remove_recursive(info->root);
+err_root:
+ pr_err("dmatest: Failed to initialize debugfs\n");
+ return ret;
+}
+
+static int __init dmatest_init(void)
+{
+ struct dmatest_info *info = &test_info;
+ struct dmatest_params *params = &info->params;
+ int ret;
+
+ memset(info, 0, sizeof(*info));
+
+ mutex_init(&info->lock);
+ INIT_LIST_HEAD(&info->channels);
+
+ mutex_init(&info->results_lock);
+ INIT_LIST_HEAD(&info->results);
+
+ /* Set default parameters */
+ params->buf_size = test_buf_size;
+ strlcpy(params->channel, test_channel, sizeof(params->channel));
+ strlcpy(params->device, test_device, sizeof(params->device));
+ params->threads_per_chan = threads_per_chan;
+ params->max_channels = max_channels;
+ params->iterations = iterations;
+ params->xor_sources = xor_sources;
+ params->pq_sources = pq_sources;
+ params->timeout = timeout;
+
+ ret = dmatest_register_dbgfs(info);
+ if (ret)
+ return ret;
+
+#ifdef MODULE
+ return 0;
+#else
+ return run_threaded_test(info);
+#endif
+}
+/* when compiled-in wait for drivers to load first */
+late_initcall(dmatest_init);
+
+static void __exit dmatest_exit(void)
+{
+ struct dmatest_info *info = &test_info;
+
+ debugfs_remove_recursive(info->root);
+ stop_threaded_test(info);
+ result_free(info, NULL);
}
module_exit(dmatest_exit);
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 43a5329..2e5deaa 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -25,6 +25,8 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/acpi_dma.h>
#include "dw_dmac_regs.h"
#include "dmaengine.h"
@@ -49,29 +51,22 @@ static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
return slave ? slave->src_master : 1;
}
-#define SRC_MASTER 0
-#define DST_MASTER 1
-
-static inline unsigned int dwc_get_master(struct dma_chan *chan, int master)
+static inline void dwc_set_masters(struct dw_dma_chan *dwc)
{
- struct dw_dma *dw = to_dw_dma(chan->device);
- struct dw_dma_slave *dws = chan->private;
- unsigned int m;
-
- if (master == SRC_MASTER)
- m = dwc_get_sms(dws);
- else
- m = dwc_get_dms(dws);
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ struct dw_dma_slave *dws = dwc->chan.private;
+ unsigned char mmax = dw->nr_masters - 1;
- return min_t(unsigned int, dw->nr_masters - 1, m);
+ if (dwc->request_line == ~0) {
+ dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws));
+ dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws));
+ }
}
#define DWC_DEFAULT_CTLLO(_chan) ({ \
struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
bool _is_slave = is_slave_direction(_dwc->direction); \
- int _dms = dwc_get_master(_chan, DST_MASTER); \
- int _sms = dwc_get_master(_chan, SRC_MASTER); \
u8 _smsize = _is_slave ? _sconfig->src_maxburst : \
DW_DMA_MSIZE_16; \
u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
@@ -81,8 +76,8 @@ static inline unsigned int dwc_get_master(struct dma_chan *chan, int master)
| DWC_CTLL_SRC_MSIZE(_smsize) \
| DWC_CTLL_LLP_D_EN \
| DWC_CTLL_LLP_S_EN \
- | DWC_CTLL_DMS(_dms) \
- | DWC_CTLL_SMS(_sms)); \
+ | DWC_CTLL_DMS(_dwc->dst_master) \
+ | DWC_CTLL_SMS(_dwc->src_master)); \
})
/*
@@ -92,13 +87,6 @@ static inline unsigned int dwc_get_master(struct dma_chan *chan, int master)
*/
#define NR_DESCS_PER_CHANNEL 64
-static inline unsigned int dwc_get_data_width(struct dma_chan *chan, int master)
-{
- struct dw_dma *dw = to_dw_dma(chan->device);
-
- return dw->data_width[dwc_get_master(chan, master)];
-}
-
/*----------------------------------------------------------------------*/
static struct device *chan2dev(struct dma_chan *chan)
@@ -172,13 +160,7 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
if (dwc->initialized == true)
return;
- if (dws && dws->cfg_hi == ~0 && dws->cfg_lo == ~0) {
- /* autoconfigure based on request line from DT */
- if (dwc->direction == DMA_MEM_TO_DEV)
- cfghi = DWC_CFGH_DST_PER(dwc->request_line);
- else if (dwc->direction == DMA_DEV_TO_MEM)
- cfghi = DWC_CFGH_SRC_PER(dwc->request_line);
- } else if (dws) {
+ if (dws) {
/*
* We need controller-specific data to set up slave
* transfers.
@@ -189,9 +171,9 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
} else {
if (dwc->direction == DMA_MEM_TO_DEV)
- cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id);
+ cfghi = DWC_CFGH_DST_PER(dwc->request_line);
else if (dwc->direction == DMA_DEV_TO_MEM)
- cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id);
+ cfghi = DWC_CFGH_SRC_PER(dwc->request_line);
}
channel_writel(dwc, CFG_LO, cfglo);
@@ -473,16 +455,16 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
(unsigned long long)llp);
list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
- /* initial residue value */
+ /* Initial residue value */
dwc->residue = desc->total_len;
- /* check first descriptors addr */
+ /* Check first descriptors addr */
if (desc->txd.phys == llp) {
spin_unlock_irqrestore(&dwc->lock, flags);
return;
}
- /* check first descriptors llp */
+ /* Check first descriptors llp */
if (desc->lli.llp == llp) {
/* This one is currently in progress */
dwc->residue -= dwc_get_sent(dwc);
@@ -588,7 +570,7 @@ inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
}
EXPORT_SYMBOL(dw_dma_get_dst_addr);
-/* called with dwc->lock held and all DMAC interrupts disabled */
+/* Called with dwc->lock held and all DMAC interrupts disabled */
static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
u32 status_err, u32 status_xfer)
{
@@ -626,7 +608,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
dwc_chan_disable(dw, dwc);
- /* make sure DMA does not restart by loading a new list */
+ /* Make sure DMA does not restart by loading a new list */
channel_writel(dwc, LLP, 0);
channel_writel(dwc, CTL_LO, 0);
channel_writel(dwc, CTL_HI, 0);
@@ -745,6 +727,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long flags)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_desc *desc;
struct dw_desc *first;
struct dw_desc *prev;
@@ -767,8 +750,8 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
dwc->direction = DMA_MEM_TO_MEM;
- data_width = min_t(unsigned int, dwc_get_data_width(chan, SRC_MASTER),
- dwc_get_data_width(chan, DST_MASTER));
+ data_width = min_t(unsigned int, dw->data_width[dwc->src_master],
+ dw->data_width[dwc->dst_master]);
src_width = dst_width = min_t(unsigned int, data_width,
dwc_fast_fls(src | dest | len));
@@ -826,6 +809,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned long flags, void *context)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(chan->device);
struct dma_slave_config *sconfig = &dwc->dma_sconfig;
struct dw_desc *prev;
struct dw_desc *first;
@@ -859,7 +843,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
DWC_CTLL_FC(DW_DMA_FC_D_M2P);
- data_width = dwc_get_data_width(chan, SRC_MASTER);
+ data_width = dw->data_width[dwc->src_master];
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
@@ -919,7 +903,7 @@ slave_sg_todev_fill_desc:
ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
DWC_CTLL_FC(DW_DMA_FC_D_P2M);
- data_width = dwc_get_data_width(chan, DST_MASTER);
+ data_width = dw->data_width[dwc->dst_master];
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
@@ -1001,13 +985,6 @@ static inline void convert_burst(u32 *maxburst)
*maxburst = 0;
}
-static inline void convert_slave_id(struct dw_dma_chan *dwc)
-{
- struct dw_dma *dw = to_dw_dma(dwc->chan.device);
-
- dwc->dma_sconfig.slave_id -= dw->request_line_base;
-}
-
static int
set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
{
@@ -1020,9 +997,12 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
dwc->direction = sconfig->direction;
+ /* Take the request line from slave_id member */
+ if (dwc->request_line == ~0)
+ dwc->request_line = sconfig->slave_id;
+
convert_burst(&dwc->dma_sconfig.src_maxburst);
convert_burst(&dwc->dma_sconfig.dst_maxburst);
- convert_slave_id(dwc);
return 0;
}
@@ -1030,10 +1010,11 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
static inline void dwc_chan_pause(struct dw_dma_chan *dwc)
{
u32 cfglo = channel_readl(dwc, CFG_LO);
+ unsigned int count = 20; /* timeout iterations */
channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
- while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
- cpu_relax();
+ while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
+ udelay(2);
dwc->paused = true;
}
@@ -1169,6 +1150,8 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
* doesn't mean what you think it means), and status writeback.
*/
+ dwc_set_masters(dwc);
+
spin_lock_irqsave(&dwc->lock, flags);
i = dwc->descs_allocated;
while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
@@ -1226,6 +1209,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
list_splice_init(&dwc->free_list, &list);
dwc->descs_allocated = 0;
dwc->initialized = false;
+ dwc->request_line = ~0;
/* Disable interrupts */
channel_clear_bit(dw, MASK.XFER, dwc->mask);
@@ -1241,42 +1225,36 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
}
-struct dw_dma_filter_args {
+/*----------------------------------------------------------------------*/
+
+struct dw_dma_of_filter_args {
struct dw_dma *dw;
unsigned int req;
unsigned int src;
unsigned int dst;
};
-static bool dw_dma_generic_filter(struct dma_chan *chan, void *param)
+static bool dw_dma_of_filter(struct dma_chan *chan, void *param)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- struct dw_dma *dw = to_dw_dma(chan->device);
- struct dw_dma_filter_args *fargs = param;
- struct dw_dma_slave *dws = &dwc->slave;
+ struct dw_dma_of_filter_args *fargs = param;
- /* ensure the device matches our channel */
+ /* Ensure the device matches our channel */
if (chan->device != &fargs->dw->dma)
return false;
- dws->dma_dev = dw->dma.dev;
- dws->cfg_hi = ~0;
- dws->cfg_lo = ~0;
- dws->src_master = fargs->src;
- dws->dst_master = fargs->dst;
-
dwc->request_line = fargs->req;
-
- chan->private = dws;
+ dwc->src_master = fargs->src;
+ dwc->dst_master = fargs->dst;
return true;
}
-static struct dma_chan *dw_dma_xlate(struct of_phandle_args *dma_spec,
- struct of_dma *ofdma)
+static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
{
struct dw_dma *dw = ofdma->of_dma_data;
- struct dw_dma_filter_args fargs = {
+ struct dw_dma_of_filter_args fargs = {
.dw = dw,
};
dma_cap_mask_t cap;
@@ -1297,8 +1275,48 @@ static struct dma_chan *dw_dma_xlate(struct of_phandle_args *dma_spec,
dma_cap_set(DMA_SLAVE, cap);
/* TODO: there should be a simpler way to do this */
- return dma_request_channel(cap, dw_dma_generic_filter, &fargs);
+ return dma_request_channel(cap, dw_dma_of_filter, &fargs);
+}
+
+#ifdef CONFIG_ACPI
+static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct acpi_dma_spec *dma_spec = param;
+
+ if (chan->device->dev != dma_spec->dev ||
+ chan->chan_id != dma_spec->chan_id)
+ return false;
+
+ dwc->request_line = dma_spec->slave_id;
+ dwc->src_master = dwc_get_sms(NULL);
+ dwc->dst_master = dwc_get_dms(NULL);
+
+ return true;
+}
+
+static void dw_dma_acpi_controller_register(struct dw_dma *dw)
+{
+ struct device *dev = dw->dma.dev;
+ struct acpi_dma_filter_info *info;
+ int ret;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return;
+
+ dma_cap_zero(info->dma_cap);
+ dma_cap_set(DMA_SLAVE, info->dma_cap);
+ info->filter_fn = dw_dma_acpi_filter;
+
+ ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate,
+ info);
+ if (ret)
+ dev_err(dev, "could not register acpi_dma_controller\n");
}
+#else /* !CONFIG_ACPI */
+static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
+#endif /* !CONFIG_ACPI */
/* --------------------- Cyclic DMA API extensions -------------------- */
@@ -1322,7 +1340,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
spin_lock_irqsave(&dwc->lock, flags);
- /* assert channel is idle */
+ /* Assert channel is idle */
if (dma_readl(dw, CH_EN) & dwc->mask) {
dev_err(chan2dev(&dwc->chan),
"BUG: Attempted to start non-idle channel\n");
@@ -1334,7 +1352,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
dma_writel(dw, CLEAR.ERROR, dwc->mask);
dma_writel(dw, CLEAR.XFER, dwc->mask);
- /* setup DMAC channel registers */
+ /* Setup DMAC channel registers */
channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
channel_writel(dwc, CTL_HI, 0);
@@ -1501,7 +1519,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
last = desc;
}
- /* lets make a cyclic list */
+ /* Let's make a cyclic list */
last->lli.llp = cdesc->desc[0]->txd.phys;
dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
@@ -1636,7 +1654,6 @@ dw_dma_parse_dt(struct platform_device *pdev)
static int dw_probe(struct platform_device *pdev)
{
- const struct platform_device_id *match;
struct dw_dma_platform_data *pdata;
struct resource *io;
struct dw_dma *dw;
@@ -1706,7 +1723,7 @@ static int dw_probe(struct platform_device *pdev)
dw->regs = regs;
- /* get hardware configuration parameters */
+ /* Get hardware configuration parameters */
if (autocfg) {
max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
@@ -1720,18 +1737,13 @@ static int dw_probe(struct platform_device *pdev)
memcpy(dw->data_width, pdata->data_width, 4);
}
- /* Get the base request line if set */
- match = platform_get_device_id(pdev);
- if (match)
- dw->request_line_base = (unsigned int)match->driver_data;
-
/* Calculate all channel mask before DMA setup */
dw->all_chan_mask = (1 << nr_channels) - 1;
- /* force dma off, just in case */
+ /* Force dma off, just in case */
dw_dma_off(dw);
- /* disable BLOCK interrupts as well */
+ /* Disable BLOCK interrupts as well */
channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0,
@@ -1741,7 +1753,7 @@ static int dw_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dw);
- /* create a pool of consistent memory blocks for hardware descriptors */
+ /* Create a pool of consistent memory blocks for hardware descriptors */
dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev,
sizeof(struct dw_desc), 4, 0);
if (!dw->desc_pool) {
@@ -1781,8 +1793,9 @@ static int dw_probe(struct platform_device *pdev)
channel_clear_bit(dw, CH_EN, dwc->mask);
dwc->direction = DMA_TRANS_NONE;
+ dwc->request_line = ~0;
- /* hardware configuration */
+ /* Hardware configuration */
if (autocfg) {
unsigned int dwc_params;
@@ -1842,12 +1855,15 @@ static int dw_probe(struct platform_device *pdev)
if (pdev->dev.of_node) {
err = of_dma_controller_register(pdev->dev.of_node,
- dw_dma_xlate, dw);
- if (err && err != -ENODEV)
+ dw_dma_of_xlate, dw);
+ if (err)
dev_err(&pdev->dev,
"could not register of_dma_controller\n");
}
+ if (ACPI_HANDLE(&pdev->dev))
+ dw_dma_acpi_controller_register(dw);
+
return 0;
}
@@ -1912,18 +1928,19 @@ static const struct dev_pm_ops dw_dev_pm_ops = {
};
#ifdef CONFIG_OF
-static const struct of_device_id dw_dma_id_table[] = {
+static const struct of_device_id dw_dma_of_id_table[] = {
{ .compatible = "snps,dma-spear1340" },
{}
};
-MODULE_DEVICE_TABLE(of, dw_dma_id_table);
+MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
#endif
-static const struct platform_device_id dw_dma_ids[] = {
- /* Name, Request Line Base */
- { "INTL9C60", (kernel_ulong_t)16 },
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id dw_dma_acpi_id_table[] = {
+ { "INTL9C60", 0 },
{ }
};
+#endif
static struct platform_driver dw_driver = {
.probe = dw_probe,
@@ -1932,9 +1949,9 @@ static struct platform_driver dw_driver = {
.driver = {
.name = "dw_dmac",
.pm = &dw_dev_pm_ops,
- .of_match_table = of_match_ptr(dw_dma_id_table),
+ .of_match_table = of_match_ptr(dw_dma_of_id_table),
+ .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
},
- .id_table = dw_dma_ids,
};
static int __init dw_init(void)
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index 4d02c36..9d41720 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -212,8 +212,11 @@ struct dw_dma_chan {
/* hardware configuration */
unsigned int block_size;
bool nollp;
+
+ /* custom slave configuration */
unsigned int request_line;
- struct dw_dma_slave slave;
+ unsigned char src_master;
+ unsigned char dst_master;
/* configuration passed via DMA_SLAVE_CONFIG */
struct dma_slave_config dma_sconfig;
@@ -247,7 +250,6 @@ struct dw_dma {
/* hardware configuration */
unsigned char nr_masters;
unsigned char data_width[4];
- unsigned int request_line_base;
struct dw_dma_chan chan[0];
};
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 70b8975..f285833 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -859,8 +859,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
- if (imxdmac->sg_list)
- kfree(imxdmac->sg_list);
+ kfree(imxdmac->sg_list);
imxdmac->sg_list = kcalloc(periods + 1,
sizeof(struct scatterlist), GFP_KERNEL);
@@ -1145,7 +1144,7 @@ err:
return ret;
}
-static int __exit imxdma_remove(struct platform_device *pdev)
+static int imxdma_remove(struct platform_device *pdev)
{
struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
@@ -1162,7 +1161,7 @@ static struct platform_driver imxdma_driver = {
.name = "imx-dma",
},
.id_table = imx_dma_devtype,
- .remove = __exit_p(imxdma_remove),
+ .remove = imxdma_remove,
};
static int __init imxdma_module_init(void)
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index f082aa3..092867b 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1462,7 +1462,7 @@ err_irq:
return ret;
}
-static int __exit sdma_remove(struct platform_device *pdev)
+static int sdma_remove(struct platform_device *pdev)
{
return -EBUSY;
}
@@ -1473,7 +1473,7 @@ static struct platform_driver sdma_driver = {
.of_match_table = sdma_dt_ids,
},
.id_table = sdma_devtypes,
- .remove = __exit_p(sdma_remove),
+ .remove = sdma_remove,
};
static int __init sdma_module_init(void)
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 1879a59..17a2393 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -892,7 +892,7 @@ MODULE_PARM_DESC(ioat_interrupt_style,
* ioat_dma_setup_interrupts - setup interrupt handler
* @device: ioat device
*/
-static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
+int ioat_dma_setup_interrupts(struct ioatdma_device *device)
{
struct ioat_chan_common *chan;
struct pci_dev *pdev = device->pdev;
@@ -941,6 +941,7 @@ msix:
}
}
intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
+ device->irq_mode = IOAT_MSIX;
goto done;
msix_single_vector:
@@ -956,6 +957,7 @@ msix_single_vector:
pci_disable_msix(pdev);
goto msi;
}
+ device->irq_mode = IOAT_MSIX_SINGLE;
goto done;
msi:
@@ -969,6 +971,7 @@ msi:
pci_disable_msi(pdev);
goto intx;
}
+ device->irq_mode = IOAT_MSIX;
goto done;
intx:
@@ -977,6 +980,7 @@ intx:
if (err)
goto err_no_irq;
+ device->irq_mode = IOAT_INTX;
done:
if (device->intr_quirk)
device->intr_quirk(device);
@@ -987,9 +991,11 @@ done:
err_no_irq:
/* Disable all interrupt generation */
writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
+ device->irq_mode = IOAT_NOIRQ;
dev_err(dev, "no usable interrupts\n");
return err;
}
+EXPORT_SYMBOL(ioat_dma_setup_interrupts);
static void ioat_disable_interrupts(struct ioatdma_device *device)
{
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 53a4cbb..54fb7b9 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -39,6 +39,7 @@
#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd)
#define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev)
+#define to_pdev(ioat_chan) ((ioat_chan)->device->pdev)
#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
@@ -48,6 +49,14 @@
*/
#define NULL_DESC_BUFFER_SIZE 1
+enum ioat_irq_mode {
+ IOAT_NOIRQ = 0,
+ IOAT_MSIX,
+ IOAT_MSIX_SINGLE,
+ IOAT_MSI,
+ IOAT_INTX
+};
+
/**
* struct ioatdma_device - internal representation of a IOAT device
* @pdev: PCI-Express device
@@ -72,11 +81,16 @@ struct ioatdma_device {
void __iomem *reg_base;
struct pci_pool *dma_pool;
struct pci_pool *completion_pool;
+#define MAX_SED_POOLS 5
+ struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
+ struct kmem_cache *sed_pool;
struct dma_device common;
u8 version;
struct msix_entry msix_entries[4];
struct ioat_chan_common *idx[4];
struct dca_provider *dca;
+ enum ioat_irq_mode irq_mode;
+ u32 cap;
void (*intr_quirk)(struct ioatdma_device *device);
int (*enumerate_channels)(struct ioatdma_device *device);
int (*reset_hw)(struct ioat_chan_common *chan);
@@ -131,6 +145,20 @@ struct ioat_dma_chan {
u16 active;
};
+/**
+ * struct ioat_sed_ent - wrapper around super extended hardware descriptor
+ * @hw: hardware SED
+ * @sed_dma: dma address for the SED
+ * @list: list member
+ * @parent: point to the dma descriptor that's the parent
+ */
+struct ioat_sed_ent {
+ struct ioat_sed_raw_descriptor *hw;
+ dma_addr_t dma;
+ struct ioat_ring_ent *parent;
+ unsigned int hw_pool;
+};
+
static inline struct ioat_chan_common *to_chan_common(struct dma_chan *c)
{
return container_of(c, struct ioat_chan_common, common);
@@ -179,7 +207,7 @@ __dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw,
struct device *dev = to_dev(chan);
dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x"
- " ctl: %#x (op: %d int_en: %d compl: %d)\n", id,
+ " ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id,
(unsigned long long) tx->phys,
(unsigned long long) hw->next, tx->cookie, tx->flags,
hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write);
@@ -201,7 +229,7 @@ ioat_chan_by_index(struct ioatdma_device *device, int index)
return device->idx[index];
}
-static inline u64 ioat_chansts(struct ioat_chan_common *chan)
+static inline u64 ioat_chansts_32(struct ioat_chan_common *chan)
{
u8 ver = chan->device->version;
u64 status;
@@ -218,6 +246,26 @@ static inline u64 ioat_chansts(struct ioat_chan_common *chan)
return status;
}
+#if BITS_PER_LONG == 64
+
+static inline u64 ioat_chansts(struct ioat_chan_common *chan)
+{
+ u8 ver = chan->device->version;
+ u64 status;
+
+ /* With IOAT v3.3 the status register is 64bit. */
+ if (ver >= IOAT_VER_3_3)
+ status = readq(chan->reg_base + IOAT_CHANSTS_OFFSET(ver));
+ else
+ status = ioat_chansts_32(chan);
+
+ return status;
+}
+
+#else
+#define ioat_chansts ioat_chansts_32
+#endif
+
static inline void ioat_start(struct ioat_chan_common *chan)
{
u8 ver = chan->device->version;
@@ -321,6 +369,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
dma_addr_t *phys_complete);
void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
void ioat_kobject_del(struct ioatdma_device *device);
+int ioat_dma_setup_interrupts(struct ioatdma_device *device);
extern const struct sysfs_ops ioat_sysfs_ops;
extern struct ioat_sysfs_entry ioat_version_attr;
extern struct ioat_sysfs_entry ioat_cap_attr;
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index e100f64..29bf944 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -137,6 +137,7 @@ struct ioat_ring_ent {
#ifdef DEBUG
int id;
#endif
+ struct ioat_sed_ent *sed;
};
static inline struct ioat_ring_ent *
@@ -157,6 +158,7 @@ static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr)
int ioat2_dma_probe(struct ioatdma_device *dev, int dca);
int ioat3_dma_probe(struct ioatdma_device *dev, int dca);
+void ioat3_dma_remove(struct ioatdma_device *dev);
struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index e8336cc..ca6ea9b 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -55,7 +55,7 @@
/*
* Support routines for v3+ hardware
*/
-
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/gfp.h>
#include <linux/dmaengine.h>
@@ -70,6 +70,10 @@
/* ioat hardware assumes at least two sources for raid operations */
#define src_cnt_to_sw(x) ((x) + 2)
#define src_cnt_to_hw(x) ((x) - 2)
+#define ndest_to_sw(x) ((x) + 1)
+#define ndest_to_hw(x) ((x) - 1)
+#define src16_cnt_to_sw(x) ((x) + 9)
+#define src16_cnt_to_hw(x) ((x) - 9)
/* provide a lookup table for setting the source address in the base or
* extended descriptor of an xor or pq descriptor
@@ -77,7 +81,20 @@
static const u8 xor_idx_to_desc = 0xe0;
static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
static const u8 pq_idx_to_desc = 0xf8;
+static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2 };
static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
+static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
+ 0, 1, 2, 3, 4, 5, 6 };
+
+/*
+ * technically sources 1 and 2 do not require SED, but the op will have
+ * at least 9 descriptors so that's irrelevant.
+ */
+static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1 };
+
+static void ioat3_eh(struct ioat2_dma_chan *ioat);
static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
{
@@ -101,6 +118,13 @@ static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx)
return raw->field[pq_idx_to_field[idx]];
}
+static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx)
+{
+ struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
+
+ return raw->field[pq16_idx_to_field[idx]];
+}
+
static void pq_set_src(struct ioat_raw_descriptor *descs[2],
dma_addr_t addr, u32 offset, u8 coef, int idx)
{
@@ -111,6 +135,167 @@ static void pq_set_src(struct ioat_raw_descriptor *descs[2],
pq->coef[idx] = coef;
}
+static int sed_get_pq16_pool_idx(int src_cnt)
+{
+
+ return pq16_idx_to_sed[src_cnt];
+}
+
+static bool is_jf_ioat(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_snb_ioat(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_ivb_ioat(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
+ return true;
+ default:
+ return false;
+ }
+
+}
+
+static bool is_hsw_ioat(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
+ return true;
+ default:
+ return false;
+ }
+
+}
+
+static bool is_xeon_cb32(struct pci_dev *pdev)
+{
+ return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
+ is_hsw_ioat(pdev);
+}
+
+static bool is_bwd_ioat(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
+ case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
+ case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
+ case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_bwd_noraid(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
+ case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
+ return true;
+ default:
+ return false;
+ }
+
+}
+
+static void pq16_set_src(struct ioat_raw_descriptor *desc[3],
+ dma_addr_t addr, u32 offset, u8 coef, int idx)
+{
+ struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
+ struct ioat_pq16a_descriptor *pq16 =
+ (struct ioat_pq16a_descriptor *)desc[1];
+ struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
+
+ raw->field[pq16_idx_to_field[idx]] = addr + offset;
+
+ if (idx < 8)
+ pq->coef[idx] = coef;
+ else
+ pq16->coef[idx - 8] = coef;
+}
+
+static struct ioat_sed_ent *
+ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool)
+{
+ struct ioat_sed_ent *sed;
+ gfp_t flags = __GFP_ZERO | GFP_ATOMIC;
+
+ sed = kmem_cache_alloc(device->sed_pool, flags);
+ if (!sed)
+ return NULL;
+
+ sed->hw_pool = hw_pool;
+ sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool],
+ flags, &sed->dma);
+ if (!sed->hw) {
+ kmem_cache_free(device->sed_pool, sed);
+ return NULL;
+ }
+
+ return sed;
+}
+
+static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *sed)
+{
+ if (!sed)
+ return;
+
+ dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
+ kmem_cache_free(device->sed_pool, sed);
+}
+
static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat,
struct ioat_ring_ent *desc, int idx)
{
@@ -223,6 +408,54 @@ static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat,
}
break;
}
+ case IOAT_OP_PQ_16S:
+ case IOAT_OP_PQ_VAL_16S: {
+ struct ioat_pq_descriptor *pq = desc->pq;
+ int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
+ struct ioat_raw_descriptor *descs[4];
+ int i;
+
+ /* in the 'continue' case don't unmap the dests as sources */
+ if (dmaf_p_disabled_continue(flags))
+ src_cnt--;
+ else if (dmaf_continue(flags))
+ src_cnt -= 3;
+
+ if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ descs[0] = (struct ioat_raw_descriptor *)pq;
+ descs[1] = (struct ioat_raw_descriptor *)(desc->sed->hw);
+ descs[2] = (struct ioat_raw_descriptor *)(&desc->sed->hw->b[0]);
+ for (i = 0; i < src_cnt; i++) {
+ dma_addr_t src = pq16_get_src(descs, i);
+
+ ioat_unmap(pdev, src - offset, len,
+ PCI_DMA_TODEVICE, flags, 0);
+ }
+
+ /* the dests are sources in pq validate operations */
+ if (pq->ctl_f.op == IOAT_OP_XOR_VAL) {
+ if (!(flags & DMA_PREP_PQ_DISABLE_P))
+ ioat_unmap(pdev, pq->p_addr - offset,
+ len, PCI_DMA_TODEVICE,
+ flags, 0);
+ if (!(flags & DMA_PREP_PQ_DISABLE_Q))
+ ioat_unmap(pdev, pq->q_addr - offset,
+ len, PCI_DMA_TODEVICE,
+ flags, 0);
+ break;
+ }
+ }
+
+ if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ if (!(flags & DMA_PREP_PQ_DISABLE_P))
+ ioat_unmap(pdev, pq->p_addr - offset, len,
+ PCI_DMA_BIDIRECTIONAL, flags, 1);
+ if (!(flags & DMA_PREP_PQ_DISABLE_Q))
+ ioat_unmap(pdev, pq->q_addr - offset, len,
+ PCI_DMA_BIDIRECTIONAL, flags, 1);
+ }
+ break;
+ }
default:
dev_err(&pdev->dev, "%s: unknown op type: %#x\n",
__func__, desc->hw->ctl_f.op);
@@ -250,6 +483,63 @@ static bool desc_has_ext(struct ioat_ring_ent *desc)
return false;
}
+static u64 ioat3_get_current_completion(struct ioat_chan_common *chan)
+{
+ u64 phys_complete;
+ u64 completion;
+
+ completion = *chan->completion;
+ phys_complete = ioat_chansts_to_addr(completion);
+
+ dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
+ (unsigned long long) phys_complete);
+
+ return phys_complete;
+}
+
+static bool ioat3_cleanup_preamble(struct ioat_chan_common *chan,
+ u64 *phys_complete)
+{
+ *phys_complete = ioat3_get_current_completion(chan);
+ if (*phys_complete == chan->last_completion)
+ return false;
+
+ clear_bit(IOAT_COMPLETION_ACK, &chan->state);
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+
+ return true;
+}
+
+static void
+desc_get_errstat(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc)
+{
+ struct ioat_dma_descriptor *hw = desc->hw;
+
+ switch (hw->ctl_f.op) {
+ case IOAT_OP_PQ_VAL:
+ case IOAT_OP_PQ_VAL_16S:
+ {
+ struct ioat_pq_descriptor *pq = desc->pq;
+
+ /* check if there's error written */
+ if (!pq->dwbes_f.wbes)
+ return;
+
+ /* need to set a chanerr var for checking to clear later */
+
+ if (pq->dwbes_f.p_val_err)
+ *desc->result |= SUM_CHECK_P_RESULT;
+
+ if (pq->dwbes_f.q_val_err)
+ *desc->result |= SUM_CHECK_Q_RESULT;
+
+ return;
+ }
+ default:
+ return;
+ }
+}
+
/**
* __cleanup - reclaim used descriptors
* @ioat: channel (ring) to clean
@@ -260,6 +550,7 @@ static bool desc_has_ext(struct ioat_ring_ent *desc)
static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
{
struct ioat_chan_common *chan = &ioat->base;
+ struct ioatdma_device *device = chan->device;
struct ioat_ring_ent *desc;
bool seen_current = false;
int idx = ioat->tail, i;
@@ -268,6 +559,16 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
__func__, ioat->head, ioat->tail, ioat->issued);
+ /*
+ * At restart of the channel, the completion address and the
+ * channel status will be 0 due to starting a new chain. Since
+ * it's new chain and the first descriptor "fails", there is
+ * nothing to clean up. We do not want to reap the entire submitted
+ * chain due to this 0 address value and then BUG.
+ */
+ if (!phys_complete)
+ return;
+
active = ioat2_ring_active(ioat);
for (i = 0; i < active && !seen_current; i++) {
struct dma_async_tx_descriptor *tx;
@@ -276,6 +577,11 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
desc = ioat2_get_ring_ent(ioat, idx + i);
dump_desc_dbg(ioat, desc);
+
+ /* set err stat if we are using dwbes */
+ if (device->cap & IOAT_CAP_DWBES)
+ desc_get_errstat(ioat, desc);
+
tx = &desc->txd;
if (tx->cookie) {
dma_cookie_complete(tx);
@@ -294,6 +600,12 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
BUG_ON(i + 1 >= active);
i++;
}
+
+ /* cleanup super extended descriptors */
+ if (desc->sed) {
+ ioat3_free_sed(device, desc->sed);
+ desc->sed = NULL;
+ }
}
smp_mb(); /* finish all descriptor reads before incrementing tail */
ioat->tail = idx + i;
@@ -314,11 +626,22 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
- dma_addr_t phys_complete;
+ u64 phys_complete;
spin_lock_bh(&chan->cleanup_lock);
- if (ioat_cleanup_preamble(chan, &phys_complete))
+
+ if (ioat3_cleanup_preamble(chan, &phys_complete))
__cleanup(ioat, phys_complete);
+
+ if (is_ioat_halted(*chan->completion)) {
+ u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+
+ if (chanerr & IOAT_CHANERR_HANDLE_MASK) {
+ mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
+ ioat3_eh(ioat);
+ }
+ }
+
spin_unlock_bh(&chan->cleanup_lock);
}
@@ -333,15 +656,78 @@ static void ioat3_cleanup_event(unsigned long data)
static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
- dma_addr_t phys_complete;
+ u64 phys_complete;
ioat2_quiesce(chan, 0);
- if (ioat_cleanup_preamble(chan, &phys_complete))
+ if (ioat3_cleanup_preamble(chan, &phys_complete))
__cleanup(ioat, phys_complete);
__ioat2_restart_chan(ioat);
}
+static void ioat3_eh(struct ioat2_dma_chan *ioat)
+{
+ struct ioat_chan_common *chan = &ioat->base;
+ struct pci_dev *pdev = to_pdev(chan);
+ struct ioat_dma_descriptor *hw;
+ u64 phys_complete;
+ struct ioat_ring_ent *desc;
+ u32 err_handled = 0;
+ u32 chanerr_int;
+ u32 chanerr;
+
+ /* cleanup so tail points to descriptor that caused the error */
+ if (ioat3_cleanup_preamble(chan, &phys_complete))
+ __cleanup(ioat, phys_complete);
+
+ chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+ pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
+
+ dev_dbg(to_dev(chan), "%s: error = %x:%x\n",
+ __func__, chanerr, chanerr_int);
+
+ desc = ioat2_get_ring_ent(ioat, ioat->tail);
+ hw = desc->hw;
+ dump_desc_dbg(ioat, desc);
+
+ switch (hw->ctl_f.op) {
+ case IOAT_OP_XOR_VAL:
+ if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
+ *desc->result |= SUM_CHECK_P_RESULT;
+ err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
+ }
+ break;
+ case IOAT_OP_PQ_VAL:
+ case IOAT_OP_PQ_VAL_16S:
+ if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
+ *desc->result |= SUM_CHECK_P_RESULT;
+ err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
+ }
+ if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
+ *desc->result |= SUM_CHECK_Q_RESULT;
+ err_handled |= IOAT_CHANERR_XOR_Q_ERR;
+ }
+ break;
+ }
+
+ /* fault on unhandled error or spurious halt */
+ if (chanerr ^ err_handled || chanerr == 0) {
+ dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n",
+ __func__, chanerr, err_handled);
+ BUG();
+ }
+
+ writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
+ pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
+
+ /* mark faulting descriptor as complete */
+ *chan->completion = desc->txd.phys;
+
+ spin_lock_bh(&ioat->prep_lock);
+ ioat3_restart_channel(ioat);
+ spin_unlock_bh(&ioat->prep_lock);
+}
+
static void check_active(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
@@ -605,7 +991,8 @@ dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct
int i;
dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
- " sz: %#x ctl: %#x (op: %d int: %d compl: %d pq: '%s%s' src_cnt: %d)\n",
+ " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
+ " src_cnt: %d)\n",
desc_id(desc), (unsigned long long) desc->txd.phys,
(unsigned long long) (pq_ex ? pq_ex->next : pq->next),
desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en,
@@ -617,6 +1004,42 @@ dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct
(unsigned long long) pq_get_src(descs, i), pq->coef[i]);
dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
+ dev_dbg(dev, "\tNEXT: %#llx\n", pq->next);
+}
+
+static void dump_pq16_desc_dbg(struct ioat2_dma_chan *ioat,
+ struct ioat_ring_ent *desc)
+{
+ struct device *dev = to_dev(&ioat->base);
+ struct ioat_pq_descriptor *pq = desc->pq;
+ struct ioat_raw_descriptor *descs[] = { (void *)pq,
+ (void *)pq,
+ (void *)pq };
+ int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
+ int i;
+
+ if (desc->sed) {
+ descs[1] = (void *)desc->sed->hw;
+ descs[2] = (void *)desc->sed->hw + 64;
+ }
+
+ dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
+ " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
+ " src_cnt: %d)\n",
+ desc_id(desc), (unsigned long long) desc->txd.phys,
+ (unsigned long long) pq->next,
+ desc->txd.flags, pq->size, pq->ctl,
+ pq->ctl_f.op, pq->ctl_f.int_en,
+ pq->ctl_f.compl_write,
+ pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
+ pq->ctl_f.src_cnt);
+ for (i = 0; i < src_cnt; i++) {
+ dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
+ (unsigned long long) pq16_get_src(descs, i),
+ pq->coef[i]);
+ }
+ dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
+ dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
}
static struct dma_async_tx_descriptor *
@@ -627,6 +1050,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
{
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
struct ioat_chan_common *chan = &ioat->base;
+ struct ioatdma_device *device = chan->device;
struct ioat_ring_ent *compl_desc;
struct ioat_ring_ent *desc;
struct ioat_ring_ent *ext;
@@ -637,6 +1061,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
u32 offset = 0;
u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
int i, s, idx, with_ext, num_descs;
+ int cb32 = (device->version < IOAT_VER_3_3) ? 1 : 0;
dev_dbg(to_dev(chan), "%s\n", __func__);
/* the engine requires at least two sources (we provide
@@ -662,7 +1087,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
* order.
*/
if (likely(num_descs) &&
- ioat2_check_space_lock(ioat, num_descs+1) == 0)
+ ioat2_check_space_lock(ioat, num_descs + cb32) == 0)
idx = ioat->head;
else
return NULL;
@@ -700,6 +1125,9 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
pq->q_addr = dst[1] + offset;
pq->ctl = 0;
pq->ctl_f.op = op;
+ /* we turn on descriptor write back error status */
+ if (device->cap & IOAT_CAP_DWBES)
+ pq->ctl_f.wb_en = result ? 1 : 0;
pq->ctl_f.src_cnt = src_cnt_to_hw(s);
pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
@@ -716,26 +1144,140 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
dump_pq_desc_dbg(ioat, desc, ext);
- /* completion descriptor carries interrupt bit */
- compl_desc = ioat2_get_ring_ent(ioat, idx + i);
- compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
- hw = compl_desc->hw;
- hw->ctl = 0;
- hw->ctl_f.null = 1;
- hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
- hw->ctl_f.compl_write = 1;
- hw->size = NULL_DESC_BUFFER_SIZE;
- dump_desc_dbg(ioat, compl_desc);
+ if (!cb32) {
+ pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+ pq->ctl_f.compl_write = 1;
+ compl_desc = desc;
+ } else {
+ /* completion descriptor carries interrupt bit */
+ compl_desc = ioat2_get_ring_ent(ioat, idx + i);
+ compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
+ hw = compl_desc->hw;
+ hw->ctl = 0;
+ hw->ctl_f.null = 1;
+ hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+ hw->ctl_f.compl_write = 1;
+ hw->size = NULL_DESC_BUFFER_SIZE;
+ dump_desc_dbg(ioat, compl_desc);
+ }
+
/* we leave the channel locked to ensure in order submission */
return &compl_desc->txd;
}
static struct dma_async_tx_descriptor *
+__ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
+ const dma_addr_t *dst, const dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf,
+ size_t len, unsigned long flags)
+{
+ struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
+ struct ioat_chan_common *chan = &ioat->base;
+ struct ioatdma_device *device = chan->device;
+ struct ioat_ring_ent *desc;
+ size_t total_len = len;
+ struct ioat_pq_descriptor *pq;
+ u32 offset = 0;
+ u8 op;
+ int i, s, idx, num_descs;
+
+ /* this function only handles src_cnt 9 - 16 */
+ BUG_ON(src_cnt < 9);
+
+ /* this function is only called with 9-16 sources */
+ op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
+
+ dev_dbg(to_dev(chan), "%s\n", __func__);
+
+ num_descs = ioat2_xferlen_to_descs(ioat, len);
+
+ /*
+ * 16 source pq is only available on cb3.3 and has no completion
+ * write hw bug.
+ */
+ if (num_descs && ioat2_check_space_lock(ioat, num_descs) == 0)
+ idx = ioat->head;
+ else
+ return NULL;
+
+ i = 0;
+
+ do {
+ struct ioat_raw_descriptor *descs[4];
+ size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
+
+ desc = ioat2_get_ring_ent(ioat, idx + i);
+ pq = desc->pq;
+
+ descs[0] = (struct ioat_raw_descriptor *) pq;
+
+ desc->sed = ioat3_alloc_sed(device,
+ sed_get_pq16_pool_idx(src_cnt));
+ if (!desc->sed) {
+ dev_err(to_dev(chan),
+ "%s: no free sed entries\n", __func__);
+ return NULL;
+ }
+
+ pq->sed_addr = desc->sed->dma;
+ desc->sed->parent = desc;
+
+ descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw;
+ descs[2] = (void *)descs[1] + 64;
+
+ for (s = 0; s < src_cnt; s++)
+ pq16_set_src(descs, src[s], offset, scf[s], s);
+
+ /* see the comment for dma_maxpq in include/linux/dmaengine.h */
+ if (dmaf_p_disabled_continue(flags))
+ pq16_set_src(descs, dst[1], offset, 1, s++);
+ else if (dmaf_continue(flags)) {
+ pq16_set_src(descs, dst[0], offset, 0, s++);
+ pq16_set_src(descs, dst[1], offset, 1, s++);
+ pq16_set_src(descs, dst[1], offset, 0, s++);
+ }
+
+ pq->size = xfer_size;
+ pq->p_addr = dst[0] + offset;
+ pq->q_addr = dst[1] + offset;
+ pq->ctl = 0;
+ pq->ctl_f.op = op;
+ pq->ctl_f.src_cnt = src16_cnt_to_hw(s);
+ /* we turn on descriptor write back error status */
+ if (device->cap & IOAT_CAP_DWBES)
+ pq->ctl_f.wb_en = result ? 1 : 0;
+ pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
+ pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
+
+ len -= xfer_size;
+ offset += xfer_size;
+ } while (++i < num_descs);
+
+ /* last pq descriptor carries the unmap parameters and fence bit */
+ desc->txd.flags = flags;
+ desc->len = total_len;
+ if (result)
+ desc->result = result;
+ pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+
+ /* with cb3.3 we should be able to do completion w/o a null desc */
+ pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+ pq->ctl_f.compl_write = 1;
+
+ dump_pq16_desc_dbg(ioat, desc);
+
+ /* we leave the channel locked to ensure in order submission */
+ return &desc->txd;
+}
+
+static struct dma_async_tx_descriptor *
ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf, size_t len,
unsigned long flags)
{
+ struct dma_device *dma = chan->device;
+
/* specify valid address for disabled result */
if (flags & DMA_PREP_PQ_DISABLE_P)
dst[0] = dst[1];
@@ -755,11 +1297,20 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
single_source_coef[0] = scf[0];
single_source_coef[1] = 0;
- return __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2,
- single_source_coef, len, flags);
- } else
- return __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, scf,
- len, flags);
+ return (src_cnt > 8) && (dma->max_pq > 8) ?
+ __ioat3_prep_pq16_lock(chan, NULL, dst, single_source,
+ 2, single_source_coef, len,
+ flags) :
+ __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2,
+ single_source_coef, len, flags);
+
+ } else {
+ return (src_cnt > 8) && (dma->max_pq > 8) ?
+ __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
+ scf, len, flags) :
+ __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt,
+ scf, len, flags);
+ }
}
struct dma_async_tx_descriptor *
@@ -767,6 +1318,8 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf, size_t len,
enum sum_check_flags *pqres, unsigned long flags)
{
+ struct dma_device *dma = chan->device;
+
/* specify valid address for disabled result */
if (flags & DMA_PREP_PQ_DISABLE_P)
pq[0] = pq[1];
@@ -778,14 +1331,18 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
*/
*pqres = 0;
- return __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
- flags);
+ return (src_cnt > 8) && (dma->max_pq > 8) ?
+ __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
+ flags) :
+ __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
+ flags);
}
static struct dma_async_tx_descriptor *
ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
unsigned int src_cnt, size_t len, unsigned long flags)
{
+ struct dma_device *dma = chan->device;
unsigned char scf[src_cnt];
dma_addr_t pq[2];
@@ -794,8 +1351,11 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
flags |= DMA_PREP_PQ_DISABLE_Q;
pq[1] = dst; /* specify valid address for disabled result */
- return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
- flags);
+ return (src_cnt > 8) && (dma->max_pq > 8) ?
+ __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
+ flags) :
+ __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
+ flags);
}
struct dma_async_tx_descriptor *
@@ -803,6 +1363,7 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
unsigned int src_cnt, size_t len,
enum sum_check_flags *result, unsigned long flags)
{
+ struct dma_device *dma = chan->device;
unsigned char scf[src_cnt];
dma_addr_t pq[2];
@@ -816,8 +1377,12 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
flags |= DMA_PREP_PQ_DISABLE_Q;
pq[1] = pq[0]; /* specify valid address for disabled result */
- return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf,
- len, flags);
+
+ return (src_cnt > 8) && (dma->max_pq > 8) ?
+ __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
+ scf, len, flags) :
+ __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
+ scf, len, flags);
}
static struct dma_async_tx_descriptor *
@@ -1167,6 +1732,56 @@ static int ioat3_dma_self_test(struct ioatdma_device *device)
return 0;
}
+static int ioat3_irq_reinit(struct ioatdma_device *device)
+{
+ int msixcnt = device->common.chancnt;
+ struct pci_dev *pdev = device->pdev;
+ int i;
+ struct msix_entry *msix;
+ struct ioat_chan_common *chan;
+ int err = 0;
+
+ switch (device->irq_mode) {
+ case IOAT_MSIX:
+
+ for (i = 0; i < msixcnt; i++) {
+ msix = &device->msix_entries[i];
+ chan = ioat_chan_by_index(device, i);
+ devm_free_irq(&pdev->dev, msix->vector, chan);
+ }
+
+ pci_disable_msix(pdev);
+ break;
+
+ case IOAT_MSIX_SINGLE:
+ msix = &device->msix_entries[0];
+ chan = ioat_chan_by_index(device, 0);
+ devm_free_irq(&pdev->dev, msix->vector, chan);
+ pci_disable_msix(pdev);
+ break;
+
+ case IOAT_MSI:
+ chan = ioat_chan_by_index(device, 0);
+ devm_free_irq(&pdev->dev, pdev->irq, chan);
+ pci_disable_msi(pdev);
+ break;
+
+ case IOAT_INTX:
+ chan = ioat_chan_by_index(device, 0);
+ devm_free_irq(&pdev->dev, pdev->irq, chan);
+ break;
+
+ default:
+ return 0;
+ }
+
+ device->irq_mode = IOAT_NOIRQ;
+
+ err = ioat_dma_setup_interrupts(device);
+
+ return err;
+}
+
static int ioat3_reset_hw(struct ioat_chan_common *chan)
{
/* throw away whatever the channel was doing and get it
@@ -1183,80 +1798,65 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan)
chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
- /* clear any pending errors */
- err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
+ if (device->version < IOAT_VER_3_3) {
+ /* clear any pending errors */
+ err = pci_read_config_dword(pdev,
+ IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
+ if (err) {
+ dev_err(&pdev->dev,
+ "channel error register unreachable\n");
+ return err;
+ }
+ pci_write_config_dword(pdev,
+ IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
+
+ /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
+ * (workaround for spurious config parity error after restart)
+ */
+ pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
+ if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
+ pci_write_config_dword(pdev,
+ IOAT_PCI_DMAUNCERRSTS_OFFSET,
+ 0x10);
+ }
+ }
+
+ err = ioat2_reset_sync(chan, msecs_to_jiffies(200));
if (err) {
- dev_err(&pdev->dev, "channel error register unreachable\n");
+ dev_err(&pdev->dev, "Failed to reset!\n");
return err;
}
- pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
- /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
- * (workaround for spurious config parity error after restart)
- */
- pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
- if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
- pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
+ if (device->irq_mode != IOAT_NOIRQ && is_bwd_ioat(pdev))
+ err = ioat3_irq_reinit(device);
- return ioat2_reset_sync(chan, msecs_to_jiffies(200));
+ return err;
}
-static bool is_jf_ioat(struct pci_dev *pdev)
+static void ioat3_intr_quirk(struct ioatdma_device *device)
{
- switch (pdev->device) {
- case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
- return true;
- default:
- return false;
- }
-}
+ struct dma_device *dma;
+ struct dma_chan *c;
+ struct ioat_chan_common *chan;
+ u32 errmask;
-static bool is_snb_ioat(struct pci_dev *pdev)
-{
- switch (pdev->device) {
- case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
- return true;
- default:
- return false;
- }
-}
+ dma = &device->common;
-static bool is_ivb_ioat(struct pci_dev *pdev)
-{
- switch (pdev->device) {
- case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
- return true;
- default:
- return false;
+ /*
+ * if we have descriptor write back error status, we mask the
+ * error interrupts
+ */
+ if (device->cap & IOAT_CAP_DWBES) {
+ list_for_each_entry(c, &dma->channels, device_node) {
+ chan = to_chan_common(c);
+ errmask = readl(chan->reg_base +
+ IOAT_CHANERR_MASK_OFFSET);
+ errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR |
+ IOAT_CHANERR_XOR_Q_ERR;
+ writel(errmask, chan->reg_base +
+ IOAT_CHANERR_MASK_OFFSET);
+ }
}
-
}
int ioat3_dma_probe(struct ioatdma_device *device, int dca)
@@ -1268,30 +1868,33 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
struct ioat_chan_common *chan;
bool is_raid_device = false;
int err;
- u32 cap;
device->enumerate_channels = ioat2_enumerate_channels;
device->reset_hw = ioat3_reset_hw;
device->self_test = ioat3_dma_self_test;
+ device->intr_quirk = ioat3_intr_quirk;
dma = &device->common;
dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
dma->device_issue_pending = ioat2_issue_pending;
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
dma->device_free_chan_resources = ioat2_free_chan_resources;
- if (is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev))
+ if (is_xeon_cb32(pdev))
dma->copy_align = 6;
dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
- cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
+ device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
+
+ if (is_bwd_noraid(pdev))
+ device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
/* dca is incompatible with raid operations */
- if (dca_en && (cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
- cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
+ if (dca_en && (device->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
+ device->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
- if (cap & IOAT_CAP_XOR) {
+ if (device->cap & IOAT_CAP_XOR) {
is_raid_device = true;
dma->max_xor = 8;
dma->xor_align = 6;
@@ -1302,53 +1905,86 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
dma->device_prep_dma_xor_val = ioat3_prep_xor_val;
}
- if (cap & IOAT_CAP_PQ) {
+
+ if (device->cap & IOAT_CAP_PQ) {
is_raid_device = true;
- dma_set_maxpq(dma, 8, 0);
- dma->pq_align = 6;
- dma_cap_set(DMA_PQ, dma->cap_mask);
dma->device_prep_dma_pq = ioat3_prep_pq;
-
- dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
dma->device_prep_dma_pq_val = ioat3_prep_pq_val;
+ dma_cap_set(DMA_PQ, dma->cap_mask);
+ dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
- if (!(cap & IOAT_CAP_XOR)) {
- dma->max_xor = 8;
- dma->xor_align = 6;
+ if (device->cap & IOAT_CAP_RAID16SS) {
+ dma_set_maxpq(dma, 16, 0);
+ dma->pq_align = 0;
+ } else {
+ dma_set_maxpq(dma, 8, 0);
+ if (is_xeon_cb32(pdev))
+ dma->pq_align = 6;
+ else
+ dma->pq_align = 0;
+ }
- dma_cap_set(DMA_XOR, dma->cap_mask);
+ if (!(device->cap & IOAT_CAP_XOR)) {
dma->device_prep_dma_xor = ioat3_prep_pqxor;
-
- dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val;
+ dma_cap_set(DMA_XOR, dma->cap_mask);
+ dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
+
+ if (device->cap & IOAT_CAP_RAID16SS) {
+ dma->max_xor = 16;
+ dma->xor_align = 0;
+ } else {
+ dma->max_xor = 8;
+ if (is_xeon_cb32(pdev))
+ dma->xor_align = 6;
+ else
+ dma->xor_align = 0;
+ }
}
}
- if (is_raid_device && (cap & IOAT_CAP_FILL_BLOCK)) {
+
+ if (is_raid_device && (device->cap & IOAT_CAP_FILL_BLOCK)) {
dma_cap_set(DMA_MEMSET, dma->cap_mask);
dma->device_prep_dma_memset = ioat3_prep_memset_lock;
}
- if (is_raid_device) {
- dma->device_tx_status = ioat3_tx_status;
- device->cleanup_fn = ioat3_cleanup_event;
- device->timer_fn = ioat3_timer_event;
- } else {
- dma->device_tx_status = ioat_dma_tx_status;
- device->cleanup_fn = ioat2_cleanup_event;
- device->timer_fn = ioat2_timer_event;
+ dma->device_tx_status = ioat3_tx_status;
+ device->cleanup_fn = ioat3_cleanup_event;
+ device->timer_fn = ioat3_timer_event;
+
+ if (is_xeon_cb32(pdev)) {
+ dma_cap_clear(DMA_XOR_VAL, dma->cap_mask);
+ dma->device_prep_dma_xor_val = NULL;
+
+ dma_cap_clear(DMA_PQ_VAL, dma->cap_mask);
+ dma->device_prep_dma_pq_val = NULL;
}
- #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
- dma_cap_clear(DMA_PQ_VAL, dma->cap_mask);
- dma->device_prep_dma_pq_val = NULL;
- #endif
+ /* starting with CB3.3 super extended descriptors are supported */
+ if (device->cap & IOAT_CAP_RAID16SS) {
+ char pool_name[14];
+ int i;
+
+ /* allocate sw descriptor pool for SED */
+ device->sed_pool = kmem_cache_create("ioat_sed",
+ sizeof(struct ioat_sed_ent), 0, 0, NULL);
+ if (!device->sed_pool)
+ return -ENOMEM;
+
+ for (i = 0; i < MAX_SED_POOLS; i++) {
+ snprintf(pool_name, 14, "ioat_hw%d_sed", i);
- #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
- dma_cap_clear(DMA_XOR_VAL, dma->cap_mask);
- dma->device_prep_dma_xor_val = NULL;
- #endif
+ /* allocate SED DMA pool */
+ device->sed_hw_pool[i] = dma_pool_create(pool_name,
+ &pdev->dev,
+ SED_SIZE * (i + 1), 64, 0);
+ if (!device->sed_hw_pool[i])
+ goto sed_pool_cleanup;
+
+ }
+ }
err = ioat_probe(device);
if (err)
@@ -1371,4 +2007,28 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
device->dca = ioat3_dca_init(pdev, device->reg_base);
return 0;
+
+sed_pool_cleanup:
+ if (device->sed_pool) {
+ int i;
+ kmem_cache_destroy(device->sed_pool);
+
+ for (i = 0; i < MAX_SED_POOLS; i++)
+ if (device->sed_hw_pool[i])
+ dma_pool_destroy(device->sed_hw_pool[i]);
+ }
+
+ return -ENOMEM;
+}
+
+void ioat3_dma_remove(struct ioatdma_device *device)
+{
+ if (device->sed_pool) {
+ int i;
+ kmem_cache_destroy(device->sed_pool);
+
+ for (i = 0; i < MAX_SED_POOLS; i++)
+ if (device->sed_hw_pool[i])
+ dma_pool_destroy(device->sed_hw_pool[i]);
+ }
}
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index 7cb74c6..5ee57d4 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -30,11 +30,6 @@
#define IOAT_PCI_DID_SCNB 0x65FF
#define IOAT_PCI_DID_SNB 0x402F
-#define IOAT_VER_1_2 0x12 /* Version 1.2 */
-#define IOAT_VER_2_0 0x20 /* Version 2.0 */
-#define IOAT_VER_3_0 0x30 /* Version 3.0 */
-#define IOAT_VER_3_2 0x32 /* Version 3.2 */
-
#define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20
#define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21
#define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22
@@ -46,6 +41,29 @@
#define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e
#define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW0 0x2f20
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW1 0x2f21
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW2 0x2f22
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW3 0x2f23
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW4 0x2f24
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW5 0x2f25
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW6 0x2f26
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW7 0x2f27
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW8 0x2f2e
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW9 0x2f2f
+
+#define PCI_DEVICE_ID_INTEL_IOAT_BWD0 0x0C50
+#define PCI_DEVICE_ID_INTEL_IOAT_BWD1 0x0C51
+#define PCI_DEVICE_ID_INTEL_IOAT_BWD2 0x0C52
+#define PCI_DEVICE_ID_INTEL_IOAT_BWD3 0x0C53
+
+#define IOAT_VER_1_2 0x12 /* Version 1.2 */
+#define IOAT_VER_2_0 0x20 /* Version 2.0 */
+#define IOAT_VER_3_0 0x30 /* Version 3.0 */
+#define IOAT_VER_3_2 0x32 /* Version 3.2 */
+#define IOAT_VER_3_3 0x33 /* Version 3.3 */
+
+
int system_has_dca_enabled(struct pci_dev *pdev);
struct ioat_dma_descriptor {
@@ -147,7 +165,17 @@ struct ioat_xor_ext_descriptor {
};
struct ioat_pq_descriptor {
- uint32_t size;
+ union {
+ uint32_t size;
+ uint32_t dwbes;
+ struct {
+ unsigned int rsvd:25;
+ unsigned int p_val_err:1;
+ unsigned int q_val_err:1;
+ unsigned int rsvd1:4;
+ unsigned int wbes:1;
+ } dwbes_f;
+ };
union {
uint32_t ctl;
struct {
@@ -162,9 +190,14 @@ struct ioat_pq_descriptor {
unsigned int hint:1;
unsigned int p_disable:1;
unsigned int q_disable:1;
- unsigned int rsvd:11;
+ unsigned int rsvd2:2;
+ unsigned int wb_en:1;
+ unsigned int prl_en:1;
+ unsigned int rsvd3:7;
#define IOAT_OP_PQ 0x89
#define IOAT_OP_PQ_VAL 0x8a
+ #define IOAT_OP_PQ_16S 0xa0
+ #define IOAT_OP_PQ_VAL_16S 0xa1
unsigned int op:8;
} ctl_f;
};
@@ -172,7 +205,10 @@ struct ioat_pq_descriptor {
uint64_t p_addr;
uint64_t next;
uint64_t src_addr2;
- uint64_t src_addr3;
+ union {
+ uint64_t src_addr3;
+ uint64_t sed_addr;
+ };
uint8_t coef[8];
uint64_t q_addr;
};
@@ -221,4 +257,40 @@ struct ioat_pq_update_descriptor {
struct ioat_raw_descriptor {
uint64_t field[8];
};
+
+struct ioat_pq16a_descriptor {
+ uint8_t coef[8];
+ uint64_t src_addr3;
+ uint64_t src_addr4;
+ uint64_t src_addr5;
+ uint64_t src_addr6;
+ uint64_t src_addr7;
+ uint64_t src_addr8;
+ uint64_t src_addr9;
+};
+
+struct ioat_pq16b_descriptor {
+ uint64_t src_addr10;
+ uint64_t src_addr11;
+ uint64_t src_addr12;
+ uint64_t src_addr13;
+ uint64_t src_addr14;
+ uint64_t src_addr15;
+ uint64_t src_addr16;
+ uint64_t rsvd;
+};
+
+union ioat_sed_pq_descriptor {
+ struct ioat_pq16a_descriptor a;
+ struct ioat_pq16b_descriptor b;
+};
+
+#define SED_SIZE 64
+
+struct ioat_sed_raw_descriptor {
+ uint64_t a[8];
+ uint64_t b[8];
+ uint64_t c[8];
+};
+
#endif
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index 71c7ecd..2c8d560 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -94,6 +94,23 @@ static struct pci_device_id ioat_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) },
+
+ /* I/OAT v3.3 platforms */
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) },
+
{ 0, }
};
MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
@@ -190,6 +207,9 @@ static void ioat_remove(struct pci_dev *pdev)
if (!device)
return;
+ if (device->version >= IOAT_VER_3_0)
+ ioat3_dma_remove(device);
+
dev_err(&pdev->dev, "Removing dma and dca services\n");
if (device->dca) {
unregister_dca_provider(device->dca, &pdev->dev);
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index 1391798..2f1cfa0 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -79,6 +79,8 @@
#define IOAT_CAP_APIC 0x00000080
#define IOAT_CAP_XOR 0x00000100
#define IOAT_CAP_PQ 0x00000200
+#define IOAT_CAP_DWBES 0x00002000
+#define IOAT_CAP_RAID16SS 0x00020000
#define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */
@@ -93,6 +95,8 @@
#define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004
#define IOAT_CHANCTRL_INT_REARM 0x0001
#define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\
+ IOAT_CHANCTRL_ERR_INT_EN |\
+ IOAT_CHANCTRL_ERR_COMPLETION_EN |\
IOAT_CHANCTRL_ANY_ERR_ABORT_EN)
#define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 8c61d17..d39c2cd 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1642,7 +1642,7 @@ static int __init ipu_idmac_init(struct ipu *ipu)
return dma_async_device_register(&idmac->dma);
}
-static void __exit ipu_idmac_exit(struct ipu *ipu)
+static void ipu_idmac_exit(struct ipu *ipu)
{
int i;
struct idmac *idmac = &ipu->idmac;
@@ -1756,7 +1756,7 @@ err_noirq:
return ret;
}
-static int __exit ipu_remove(struct platform_device *pdev)
+static int ipu_remove(struct platform_device *pdev)
{
struct ipu *ipu = platform_get_drvdata(pdev);
@@ -1781,7 +1781,7 @@ static struct platform_driver ipu_platform_driver = {
.name = "ipu-core",
.owner = THIS_MODULE,
},
- .remove = __exit_p(ipu_remove),
+ .remove = ipu_remove,
};
static int __init ipu_init(void)
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index 69d04d2..7aa0864 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -13,43 +13,31 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/module.h>
-#include <linux/rculist.h>
+#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_dma.h>
static LIST_HEAD(of_dma_list);
-static DEFINE_SPINLOCK(of_dma_lock);
+static DEFINE_MUTEX(of_dma_lock);
/**
- * of_dma_get_controller - Get a DMA controller in DT DMA helpers list
+ * of_dma_find_controller - Get a DMA controller in DT DMA helpers list
* @dma_spec: pointer to DMA specifier as found in the device tree
*
* Finds a DMA controller with matching device node and number for dma cells
- * in a list of registered DMA controllers. If a match is found the use_count
- * variable is increased and a valid pointer to the DMA data stored is retuned.
- * A NULL pointer is returned if no match is found.
+ * in a list of registered DMA controllers. If a match is found a valid pointer
+ * to the DMA data stored is retuned. A NULL pointer is returned if no match is
+ * found.
*/
-static struct of_dma *of_dma_get_controller(struct of_phandle_args *dma_spec)
+static struct of_dma *of_dma_find_controller(struct of_phandle_args *dma_spec)
{
struct of_dma *ofdma;
- spin_lock(&of_dma_lock);
-
- if (list_empty(&of_dma_list)) {
- spin_unlock(&of_dma_lock);
- return NULL;
- }
-
list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers)
if ((ofdma->of_node == dma_spec->np) &&
- (ofdma->of_dma_nbcells == dma_spec->args_count)) {
- ofdma->use_count++;
- spin_unlock(&of_dma_lock);
+ (ofdma->of_dma_nbcells == dma_spec->args_count))
return ofdma;
- }
-
- spin_unlock(&of_dma_lock);
pr_debug("%s: can't find DMA controller %s\n", __func__,
dma_spec->np->full_name);
@@ -58,22 +46,6 @@ static struct of_dma *of_dma_get_controller(struct of_phandle_args *dma_spec)
}
/**
- * of_dma_put_controller - Decrement use count for a registered DMA controller
- * @of_dma: pointer to DMA controller data
- *
- * Decrements the use_count variable in the DMA data structure. This function
- * should be called only when a valid pointer is returned from
- * of_dma_get_controller() and no further accesses to data referenced by that
- * pointer are needed.
- */
-static void of_dma_put_controller(struct of_dma *ofdma)
-{
- spin_lock(&of_dma_lock);
- ofdma->use_count--;
- spin_unlock(&of_dma_lock);
-}
-
-/**
* of_dma_controller_register - Register a DMA controller to DT DMA helpers
* @np: device node of DMA controller
* @of_dma_xlate: translation function which converts a phandle
@@ -93,6 +65,7 @@ int of_dma_controller_register(struct device_node *np,
{
struct of_dma *ofdma;
int nbcells;
+ const __be32 *prop;
if (!np || !of_dma_xlate) {
pr_err("%s: not enough information provided\n", __func__);
@@ -103,8 +76,11 @@ int of_dma_controller_register(struct device_node *np,
if (!ofdma)
return -ENOMEM;
- nbcells = be32_to_cpup(of_get_property(np, "#dma-cells", NULL));
- if (!nbcells) {
+ prop = of_get_property(np, "#dma-cells", NULL);
+ if (prop)
+ nbcells = be32_to_cpup(prop);
+
+ if (!prop || !nbcells) {
pr_err("%s: #dma-cells property is missing or invalid\n",
__func__);
kfree(ofdma);
@@ -115,12 +91,11 @@ int of_dma_controller_register(struct device_node *np,
ofdma->of_dma_nbcells = nbcells;
ofdma->of_dma_xlate = of_dma_xlate;
ofdma->of_dma_data = data;
- ofdma->use_count = 0;
/* Now queue of_dma controller structure in list */
- spin_lock(&of_dma_lock);
+ mutex_lock(&of_dma_lock);
list_add_tail(&ofdma->of_dma_controllers, &of_dma_list);
- spin_unlock(&of_dma_lock);
+ mutex_unlock(&of_dma_lock);
return 0;
}
@@ -132,32 +107,20 @@ EXPORT_SYMBOL_GPL(of_dma_controller_register);
*
* Memory allocated by of_dma_controller_register() is freed here.
*/
-int of_dma_controller_free(struct device_node *np)
+void of_dma_controller_free(struct device_node *np)
{
struct of_dma *ofdma;
- spin_lock(&of_dma_lock);
-
- if (list_empty(&of_dma_list)) {
- spin_unlock(&of_dma_lock);
- return -ENODEV;
- }
+ mutex_lock(&of_dma_lock);
list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers)
if (ofdma->of_node == np) {
- if (ofdma->use_count) {
- spin_unlock(&of_dma_lock);
- return -EBUSY;
- }
-
list_del(&ofdma->of_dma_controllers);
- spin_unlock(&of_dma_lock);
kfree(ofdma);
- return 0;
+ break;
}
- spin_unlock(&of_dma_lock);
- return -ENODEV;
+ mutex_unlock(&of_dma_lock);
}
EXPORT_SYMBOL_GPL(of_dma_controller_free);
@@ -172,8 +135,8 @@ EXPORT_SYMBOL_GPL(of_dma_controller_free);
* specifiers, matches the name provided. Returns 0 if the name matches and
* a valid pointer to the DMA specifier is found. Otherwise returns -ENODEV.
*/
-static int of_dma_match_channel(struct device_node *np, char *name, int index,
- struct of_phandle_args *dma_spec)
+static int of_dma_match_channel(struct device_node *np, const char *name,
+ int index, struct of_phandle_args *dma_spec)
{
const char *s;
@@ -198,7 +161,7 @@ static int of_dma_match_channel(struct device_node *np, char *name, int index,
* Returns pointer to appropriate dma channel on success or NULL on error.
*/
struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
- char *name)
+ const char *name)
{
struct of_phandle_args dma_spec;
struct of_dma *ofdma;
@@ -220,14 +183,15 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
if (of_dma_match_channel(np, name, i, &dma_spec))
continue;
- ofdma = of_dma_get_controller(&dma_spec);
-
- if (!ofdma)
- continue;
+ mutex_lock(&of_dma_lock);
+ ofdma = of_dma_find_controller(&dma_spec);
- chan = ofdma->of_dma_xlate(&dma_spec, ofdma);
+ if (ofdma)
+ chan = ofdma->of_dma_xlate(&dma_spec, ofdma);
+ else
+ chan = NULL;
- of_dma_put_controller(ofdma);
+ mutex_unlock(&of_dma_lock);
of_node_put(dma_spec.np);
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 08b43bf..ec3fc4f 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -16,6 +16,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/of_dma.h>
+#include <linux/of_device.h>
#include "virt-dma.h"
@@ -67,6 +69,10 @@ static const unsigned es_bytes[] = {
[OMAP_DMA_DATA_TYPE_S32] = 4,
};
+static struct of_dma_filter_info omap_dma_info = {
+ .filter_fn = omap_dma_filter_fn,
+};
+
static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
{
return container_of(d, struct omap_dmadev, ddev);
@@ -629,8 +635,22 @@ static int omap_dma_probe(struct platform_device *pdev)
pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
rc);
omap_dma_free(od);
- } else {
- platform_set_drvdata(pdev, od);
+ return rc;
+ }
+
+ platform_set_drvdata(pdev, od);
+
+ if (pdev->dev.of_node) {
+ omap_dma_info.dma_cap = od->ddev.cap_mask;
+
+ /* Device-tree DMA controller registration */
+ rc = of_dma_controller_register(pdev->dev.of_node,
+ of_dma_simple_xlate, &omap_dma_info);
+ if (rc) {
+ pr_warn("OMAP-DMA: failed to register DMA controller\n");
+ dma_async_device_unregister(&od->ddev);
+ omap_dma_free(od);
+ }
}
dev_info(&pdev->dev, "OMAP DMA engine driver\n");
@@ -642,18 +662,32 @@ static int omap_dma_remove(struct platform_device *pdev)
{
struct omap_dmadev *od = platform_get_drvdata(pdev);
+ if (pdev->dev.of_node)
+ of_dma_controller_free(pdev->dev.of_node);
+
dma_async_device_unregister(&od->ddev);
omap_dma_free(od);
return 0;
}
+static const struct of_device_id omap_dma_match[] = {
+ { .compatible = "ti,omap2420-sdma", },
+ { .compatible = "ti,omap2430-sdma", },
+ { .compatible = "ti,omap3430-sdma", },
+ { .compatible = "ti,omap3630-sdma", },
+ { .compatible = "ti,omap4430-sdma", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_dma_match);
+
static struct platform_driver omap_dma_driver = {
.probe = omap_dma_probe,
.remove = omap_dma_remove,
.driver = {
.name = "omap-dma-engine",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(omap_dma_match),
},
};
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index d01faeb..ce3dc3e 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -476,7 +476,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
if (!ret) {
- ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO);
+ ret = pdc_alloc_desc(&pd_chan->chan, GFP_ATOMIC);
if (ret) {
spin_lock(&pd_chan->lock);
pd_chan->descs_allocated++;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 5dbc594..a17553f 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -26,6 +26,7 @@
#include <linux/scatterlist.h>
#include <linux/of.h>
#include <linux/of_dma.h>
+#include <linux/err.h>
#include "dmaengine.h"
#define PL330_MAX_CHAN 8
@@ -2288,13 +2289,12 @@ static inline void fill_queue(struct dma_pl330_chan *pch)
/* If already submitted */
if (desc->status == BUSY)
- break;
+ continue;
ret = pl330_submit_req(pch->pl330_chid,
&desc->req);
if (!ret) {
desc->status = BUSY;
- break;
} else if (ret == -EAGAIN) {
/* QFull or DMAC Dying */
break;
@@ -2904,9 +2904,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
res = &adev->res;
- pi->base = devm_request_and_ioremap(&adev->dev, res);
- if (!pi->base)
- return -ENXIO;
+ pi->base = devm_ioremap_resource(&adev->dev, res);
+ if (IS_ERR(pi->base))
+ return PTR_ERR(pi->base);
amba_set_drvdata(adev, pdmac);
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
new file mode 100644
index 0000000..5c1dee2
--- /dev/null
+++ b/drivers/dma/sh/Kconfig
@@ -0,0 +1,24 @@
+#
+# DMA engine configuration for sh
+#
+
+config SH_DMAE_BASE
+ bool "Renesas SuperH DMA Engine support"
+ depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE)
+ depends on !SH_DMA_API
+ default y
+ select DMA_ENGINE
+ help
+ Enable support for the Renesas SuperH DMA controllers.
+
+config SH_DMAE
+ tristate "Renesas SuperH DMAC support"
+ depends on SH_DMAE_BASE
+ help
+ Enable support for the Renesas SuperH DMA controllers.
+
+config SUDMAC
+ tristate "Renesas SUDMAC support"
+ depends on SH_DMAE_BASE
+ help
+ Enable support for the Renesas SUDMAC controllers.
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index 54ae957..c07ca46 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -1,2 +1,3 @@
-obj-$(CONFIG_SH_DMAE) += shdma-base.o
+obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o
obj-$(CONFIG_SH_DMAE) += shdma.o
+obj-$(CONFIG_SUDMAC) += sudmac.o
diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c
new file mode 100644
index 0000000..e7c94bb
--- /dev/null
+++ b/drivers/dma/sh/sudmac.c
@@ -0,0 +1,428 @@
+/*
+ * Renesas SUDMAC support
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ *
+ * based on drivers/dma/sh/shdma.c:
+ * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
+ * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+#include <linux/sudmac.h>
+
+struct sudmac_chan {
+ struct shdma_chan shdma_chan;
+ void __iomem *base;
+ char dev_id[16]; /* unique name per DMAC of channel */
+
+ u32 offset; /* for CFG, BA, BBC, CA, CBC, DEN */
+ u32 cfg;
+ u32 dint_end_bit;
+};
+
+struct sudmac_device {
+ struct shdma_dev shdma_dev;
+ struct sudmac_pdata *pdata;
+ void __iomem *chan_reg;
+};
+
+struct sudmac_regs {
+ u32 base_addr;
+ u32 base_byte_count;
+};
+
+struct sudmac_desc {
+ struct sudmac_regs hw;
+ struct shdma_desc shdma_desc;
+};
+
+#define to_chan(schan) container_of(schan, struct sudmac_chan, shdma_chan)
+#define to_desc(sdesc) container_of(sdesc, struct sudmac_desc, shdma_desc)
+#define to_sdev(sc) container_of(sc->shdma_chan.dma_chan.device, \
+ struct sudmac_device, shdma_dev.dma_dev)
+
+/* SUDMAC register */
+#define SUDMAC_CH0CFG 0x00
+#define SUDMAC_CH0BA 0x10
+#define SUDMAC_CH0BBC 0x18
+#define SUDMAC_CH0CA 0x20
+#define SUDMAC_CH0CBC 0x28
+#define SUDMAC_CH0DEN 0x30
+#define SUDMAC_DSTSCLR 0x38
+#define SUDMAC_DBUFCTRL 0x3C
+#define SUDMAC_DINTCTRL 0x40
+#define SUDMAC_DINTSTS 0x44
+#define SUDMAC_DINTSTSCLR 0x48
+#define SUDMAC_CH0SHCTRL 0x50
+
+/* Definitions for the sudmac_channel.config */
+#define SUDMAC_SENDBUFM 0x1000 /* b12: Transmit Buffer Mode */
+#define SUDMAC_RCVENDM 0x0100 /* b8: Receive Data Transfer End Mode */
+#define SUDMAC_LBA_WAIT 0x0030 /* b5-4: Local Bus Access Wait */
+
+/* Definitions for the sudmac_channel.dint_end_bit */
+#define SUDMAC_CH1ENDE 0x0002 /* b1: Ch1 DMA Transfer End Int Enable */
+#define SUDMAC_CH0ENDE 0x0001 /* b0: Ch0 DMA Transfer End Int Enable */
+
+#define SUDMAC_DRV_NAME "sudmac"
+
+static void sudmac_writel(struct sudmac_chan *sc, u32 data, u32 reg)
+{
+ iowrite32(data, sc->base + reg);
+}
+
+static u32 sudmac_readl(struct sudmac_chan *sc, u32 reg)
+{
+ return ioread32(sc->base + reg);
+}
+
+static bool sudmac_is_busy(struct sudmac_chan *sc)
+{
+ u32 den = sudmac_readl(sc, SUDMAC_CH0DEN + sc->offset);
+
+ if (den)
+ return true; /* working */
+
+ return false; /* waiting */
+}
+
+static void sudmac_set_reg(struct sudmac_chan *sc, struct sudmac_regs *hw,
+ struct shdma_desc *sdesc)
+{
+ sudmac_writel(sc, sc->cfg, SUDMAC_CH0CFG + sc->offset);
+ sudmac_writel(sc, hw->base_addr, SUDMAC_CH0BA + sc->offset);
+ sudmac_writel(sc, hw->base_byte_count, SUDMAC_CH0BBC + sc->offset);
+}
+
+static void sudmac_start(struct sudmac_chan *sc)
+{
+ u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL);
+
+ sudmac_writel(sc, dintctrl | sc->dint_end_bit, SUDMAC_DINTCTRL);
+ sudmac_writel(sc, 1, SUDMAC_CH0DEN + sc->offset);
+}
+
+static void sudmac_start_xfer(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ struct sudmac_chan *sc = to_chan(schan);
+ struct sudmac_desc *sd = to_desc(sdesc);
+
+ sudmac_set_reg(sc, &sd->hw, sdesc);
+ sudmac_start(sc);
+}
+
+static bool sudmac_channel_busy(struct shdma_chan *schan)
+{
+ struct sudmac_chan *sc = to_chan(schan);
+
+ return sudmac_is_busy(sc);
+}
+
+static void sudmac_setup_xfer(struct shdma_chan *schan, int slave_id)
+{
+}
+
+static const struct sudmac_slave_config *sudmac_find_slave(
+ struct sudmac_chan *sc, int slave_id)
+{
+ struct sudmac_device *sdev = to_sdev(sc);
+ struct sudmac_pdata *pdata = sdev->pdata;
+ const struct sudmac_slave_config *cfg;
+ int i;
+
+ for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
+ if (cfg->slave_id == slave_id)
+ return cfg;
+
+ return NULL;
+}
+
+static int sudmac_set_slave(struct shdma_chan *schan, int slave_id, bool try)
+{
+ struct sudmac_chan *sc = to_chan(schan);
+ const struct sudmac_slave_config *cfg = sudmac_find_slave(sc, slave_id);
+
+ if (!cfg)
+ return -ENODEV;
+
+ return 0;
+}
+
+static inline void sudmac_dma_halt(struct sudmac_chan *sc)
+{
+ u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL);
+
+ sudmac_writel(sc, 0, SUDMAC_CH0DEN + sc->offset);
+ sudmac_writel(sc, dintctrl & ~sc->dint_end_bit, SUDMAC_DINTCTRL);
+ sudmac_writel(sc, sc->dint_end_bit, SUDMAC_DINTSTSCLR);
+}
+
+static int sudmac_desc_setup(struct shdma_chan *schan,
+ struct shdma_desc *sdesc,
+ dma_addr_t src, dma_addr_t dst, size_t *len)
+{
+ struct sudmac_chan *sc = to_chan(schan);
+ struct sudmac_desc *sd = to_desc(sdesc);
+
+ dev_dbg(sc->shdma_chan.dev, "%s: src=%x, dst=%x, len=%d\n",
+ __func__, src, dst, *len);
+
+ if (*len > schan->max_xfer_len)
+ *len = schan->max_xfer_len;
+
+ if (dst)
+ sd->hw.base_addr = dst;
+ else if (src)
+ sd->hw.base_addr = src;
+ sd->hw.base_byte_count = *len;
+
+ return 0;
+}
+
+static void sudmac_halt(struct shdma_chan *schan)
+{
+ struct sudmac_chan *sc = to_chan(schan);
+
+ sudmac_dma_halt(sc);
+}
+
+static bool sudmac_chan_irq(struct shdma_chan *schan, int irq)
+{
+ struct sudmac_chan *sc = to_chan(schan);
+ u32 dintsts = sudmac_readl(sc, SUDMAC_DINTSTS);
+
+ if (!(dintsts & sc->dint_end_bit))
+ return false;
+
+ /* DMA stop */
+ sudmac_dma_halt(sc);
+
+ return true;
+}
+
+static size_t sudmac_get_partial(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ struct sudmac_chan *sc = to_chan(schan);
+ struct sudmac_desc *sd = to_desc(sdesc);
+ u32 current_byte_count = sudmac_readl(sc, SUDMAC_CH0CBC + sc->offset);
+
+ return sd->hw.base_byte_count - current_byte_count;
+}
+
+static bool sudmac_desc_completed(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ struct sudmac_chan *sc = to_chan(schan);
+ struct sudmac_desc *sd = to_desc(sdesc);
+ u32 current_addr = sudmac_readl(sc, SUDMAC_CH0CA + sc->offset);
+
+ return sd->hw.base_addr + sd->hw.base_byte_count == current_addr;
+}
+
+static int sudmac_chan_probe(struct sudmac_device *su_dev, int id, int irq,
+ unsigned long flags)
+{
+ struct shdma_dev *sdev = &su_dev->shdma_dev;
+ struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
+ struct sudmac_chan *sc;
+ struct shdma_chan *schan;
+ int err;
+
+ sc = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_chan), GFP_KERNEL);
+ if (!sc) {
+ dev_err(sdev->dma_dev.dev,
+ "No free memory for allocating dma channels!\n");
+ return -ENOMEM;
+ }
+
+ schan = &sc->shdma_chan;
+ schan->max_xfer_len = 64 * 1024 * 1024 - 1;
+
+ shdma_chan_probe(sdev, schan, id);
+
+ sc->base = su_dev->chan_reg;
+
+ /* get platform_data */
+ sc->offset = su_dev->pdata->channel->offset;
+ if (su_dev->pdata->channel->config & SUDMAC_TX_BUFFER_MODE)
+ sc->cfg |= SUDMAC_SENDBUFM;
+ if (su_dev->pdata->channel->config & SUDMAC_RX_END_MODE)
+ sc->cfg |= SUDMAC_RCVENDM;
+ sc->cfg |= (su_dev->pdata->channel->wait << 4) & SUDMAC_LBA_WAIT;
+
+ if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH0)
+ sc->dint_end_bit |= SUDMAC_CH0ENDE;
+ if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH1)
+ sc->dint_end_bit |= SUDMAC_CH1ENDE;
+
+ /* set up channel irq */
+ if (pdev->id >= 0)
+ snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d.%d",
+ pdev->id, id);
+ else
+ snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d", id);
+
+ err = shdma_request_irq(schan, irq, flags, sc->dev_id);
+ if (err) {
+ dev_err(sdev->dma_dev.dev,
+ "DMA channel %d request_irq failed %d\n", id, err);
+ goto err_no_irq;
+ }
+
+ return 0;
+
+err_no_irq:
+ /* remove from dmaengine device node */
+ shdma_chan_remove(schan);
+ return err;
+}
+
+static void sudmac_chan_remove(struct sudmac_device *su_dev)
+{
+ struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev;
+ struct shdma_chan *schan;
+ int i;
+
+ shdma_for_each_chan(schan, &su_dev->shdma_dev, i) {
+ struct sudmac_chan *sc = to_chan(schan);
+
+ BUG_ON(!schan);
+
+ shdma_free_irq(&sc->shdma_chan);
+ shdma_chan_remove(schan);
+ }
+ dma_dev->chancnt = 0;
+}
+
+static dma_addr_t sudmac_slave_addr(struct shdma_chan *schan)
+{
+ /* SUDMAC doesn't need the address */
+ return 0;
+}
+
+static struct shdma_desc *sudmac_embedded_desc(void *buf, int i)
+{
+ return &((struct sudmac_desc *)buf)[i].shdma_desc;
+}
+
+static const struct shdma_ops sudmac_shdma_ops = {
+ .desc_completed = sudmac_desc_completed,
+ .halt_channel = sudmac_halt,
+ .channel_busy = sudmac_channel_busy,
+ .slave_addr = sudmac_slave_addr,
+ .desc_setup = sudmac_desc_setup,
+ .set_slave = sudmac_set_slave,
+ .setup_xfer = sudmac_setup_xfer,
+ .start_xfer = sudmac_start_xfer,
+ .embedded_desc = sudmac_embedded_desc,
+ .chan_irq = sudmac_chan_irq,
+ .get_partial = sudmac_get_partial,
+};
+
+static int sudmac_probe(struct platform_device *pdev)
+{
+ struct sudmac_pdata *pdata = pdev->dev.platform_data;
+ int err, i;
+ struct sudmac_device *su_dev;
+ struct dma_device *dma_dev;
+ struct resource *chan, *irq_res;
+
+ /* get platform data */
+ if (!pdata)
+ return -ENODEV;
+
+ chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!chan || !irq_res)
+ return -ENODEV;
+
+ err = -ENOMEM;
+ su_dev = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_device),
+ GFP_KERNEL);
+ if (!su_dev) {
+ dev_err(&pdev->dev, "Not enough memory\n");
+ return err;
+ }
+
+ dma_dev = &su_dev->shdma_dev.dma_dev;
+
+ su_dev->chan_reg = devm_request_and_ioremap(&pdev->dev, chan);
+ if (!su_dev->chan_reg)
+ return err;
+
+ dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+
+ su_dev->shdma_dev.ops = &sudmac_shdma_ops;
+ su_dev->shdma_dev.desc_size = sizeof(struct sudmac_desc);
+ err = shdma_init(&pdev->dev, &su_dev->shdma_dev, pdata->channel_num);
+ if (err < 0)
+ return err;
+
+ /* platform data */
+ su_dev->pdata = pdev->dev.platform_data;
+
+ platform_set_drvdata(pdev, su_dev);
+
+ /* Create DMA Channel */
+ for (i = 0; i < pdata->channel_num; i++) {
+ err = sudmac_chan_probe(su_dev, i, irq_res->start, IRQF_SHARED);
+ if (err)
+ goto chan_probe_err;
+ }
+
+ err = dma_async_device_register(&su_dev->shdma_dev.dma_dev);
+ if (err < 0)
+ goto chan_probe_err;
+
+ return err;
+
+chan_probe_err:
+ sudmac_chan_remove(su_dev);
+
+ platform_set_drvdata(pdev, NULL);
+ shdma_cleanup(&su_dev->shdma_dev);
+
+ return err;
+}
+
+static int sudmac_remove(struct platform_device *pdev)
+{
+ struct sudmac_device *su_dev = platform_get_drvdata(pdev);
+ struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev;
+
+ dma_async_device_unregister(dma_dev);
+ sudmac_chan_remove(su_dev);
+ shdma_cleanup(&su_dev->shdma_dev);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver sudmac_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = SUDMAC_DRV_NAME,
+ },
+ .probe = sudmac_probe,
+ .remove = sudmac_remove,
+};
+module_platform_driver(sudmac_driver);
+
+MODULE_AUTHOR("Yoshihiro Shimoda");
+MODULE_DESCRIPTION("Renesas SUDMAC driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" SUDMAC_DRV_NAME);
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 1d627e2..1765a0a 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -16,6 +16,7 @@
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
+#include <linux/clk.h>
#include <linux/sirfsoc_dma.h>
#include "dmaengine.h"
@@ -78,6 +79,7 @@ struct sirfsoc_dma {
struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS];
void __iomem *base;
int irq;
+ struct clk *clk;
bool is_marco;
};
@@ -639,6 +641,12 @@ static int sirfsoc_dma_probe(struct platform_device *op)
return -EINVAL;
}
+ sdma->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(sdma->clk)) {
+ dev_err(dev, "failed to get a clock.\n");
+ return PTR_ERR(sdma->clk);
+ }
+
ret = of_address_to_resource(dn, 0, &res);
if (ret) {
dev_err(dev, "Error parsing memory region!\n");
@@ -698,6 +706,8 @@ static int sirfsoc_dma_probe(struct platform_device *op)
tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
+ clk_prepare_enable(sdma->clk);
+
/* Register DMA engine */
dev_set_drvdata(dev, sdma);
ret = dma_async_device_register(dma);
@@ -720,6 +730,7 @@ static int sirfsoc_dma_remove(struct platform_device *op)
struct device *dev = &op->dev;
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+ clk_disable_unprepare(sdma->clk);
dma_async_device_unregister(&sdma->dma);
free_irq(sdma->irq, sdma);
irq_dispose_mapping(sdma->irq);
@@ -742,7 +753,18 @@ static struct platform_driver sirfsoc_dma_driver = {
},
};
-module_platform_driver(sirfsoc_dma_driver);
+static __init int sirfsoc_dma_init(void)
+{
+ return platform_driver_register(&sirfsoc_dma_driver);
+}
+
+static void __exit sirfsoc_dma_exit(void)
+{
+ platform_driver_unregister(&sirfsoc_dma_driver);
+}
+
+subsys_initcall(sirfsoc_dma_init);
+module_exit(sirfsoc_dma_exit);
MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
"Barry Song <baohua.song@csr.com>");
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index fcee27e..ce19340 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -30,6 +30,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/clk/tegra.h>
@@ -199,6 +200,7 @@ struct tegra_dma_channel {
/* Channel-slave specific configuration */
struct dma_slave_config dma_sconfig;
+ struct tegra_dma_channel_regs channel_reg;
};
/* tegra_dma: Tegra DMA specific information */
@@ -1213,7 +1215,6 @@ static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
.support_channel_pause = false,
};
-#if defined(CONFIG_OF)
/* Tegra30 specific DMA controller information */
static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
.nr_channels = 32,
@@ -1243,7 +1244,6 @@ static const struct of_device_id tegra_dma_of_match[] = {
},
};
MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
-#endif
static int tegra_dma_probe(struct platform_device *pdev)
{
@@ -1252,20 +1252,14 @@ static int tegra_dma_probe(struct platform_device *pdev)
int ret;
int i;
const struct tegra_dma_chip_data *cdata = NULL;
+ const struct of_device_id *match;
- if (pdev->dev.of_node) {
- const struct of_device_id *match;
- match = of_match_device(of_match_ptr(tegra_dma_of_match),
- &pdev->dev);
- if (!match) {
- dev_err(&pdev->dev, "Error: No device match found\n");
- return -ENODEV;
- }
- cdata = match->data;
- } else {
- /* If no device tree then fallback to tegra20 */
- cdata = &tegra20_dma_chip_data;
+ match = of_match_device(tegra_dma_of_match, &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "Error: No device match found\n");
+ return -ENODEV;
}
+ cdata = match->data;
tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
sizeof(struct tegra_dma_channel), GFP_KERNEL);
@@ -1448,11 +1442,74 @@ static int tegra_dma_runtime_resume(struct device *dev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int tegra_dma_pm_suspend(struct device *dev)
+{
+ struct tegra_dma *tdma = dev_get_drvdata(dev);
+ int i;
+ int ret;
+
+ /* Enable clock before accessing register */
+ ret = tegra_dma_runtime_resume(dev);
+ if (ret < 0)
+ return ret;
+
+ tdma->reg_gen = tdma_read(tdma, TEGRA_APBDMA_GENERAL);
+ for (i = 0; i < tdma->chip_data->nr_channels; i++) {
+ struct tegra_dma_channel *tdc = &tdma->channels[i];
+ struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
+
+ ch_reg->csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
+ ch_reg->ahb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBPTR);
+ ch_reg->apb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBPTR);
+ ch_reg->ahb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBSEQ);
+ ch_reg->apb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBSEQ);
+ }
+
+ /* Disable clock */
+ tegra_dma_runtime_suspend(dev);
+ return 0;
+}
+
+static int tegra_dma_pm_resume(struct device *dev)
+{
+ struct tegra_dma *tdma = dev_get_drvdata(dev);
+ int i;
+ int ret;
+
+ /* Enable clock before accessing register */
+ ret = tegra_dma_runtime_resume(dev);
+ if (ret < 0)
+ return ret;
+
+ tdma_write(tdma, TEGRA_APBDMA_GENERAL, tdma->reg_gen);
+ tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
+ tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
+
+ for (i = 0; i < tdma->chip_data->nr_channels; i++) {
+ struct tegra_dma_channel *tdc = &tdma->channels[i];
+ struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
+
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_reg->apb_seq);
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_reg->apb_ptr);
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_reg->ahb_seq);
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_reg->ahb_ptr);
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
+ (ch_reg->csr & ~TEGRA_APBDMA_CSR_ENB));
+ }
+
+ /* Disable clock */
+ tegra_dma_runtime_suspend(dev);
+ return 0;
+}
+#endif
+
static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
#ifdef CONFIG_PM_RUNTIME
.runtime_suspend = tegra_dma_runtime_suspend,
.runtime_resume = tegra_dma_runtime_resume,
#endif
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume)
};
static struct platform_driver tegra_dmac_driver = {
@@ -1460,7 +1517,7 @@ static struct platform_driver tegra_dmac_driver = {
.name = "tegra-apbdma",
.owner = THIS_MODULE,
.pm = &tegra_dma_dev_pm_ops,
- .of_match_table = of_match_ptr(tegra_dma_of_match),
+ .of_match_table = tegra_dma_of_match,
},
.probe = tegra_dma_probe,
.remove = tegra_dma_remove,
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 952f823..26107ba 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -823,7 +823,7 @@ static struct platform_driver td_driver = {
.owner = THIS_MODULE,
},
.probe = td_probe,
- .remove = __exit_p(td_remove),
+ .remove = td_remove,
};
module_platform_driver(td_driver);
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 913f55c..a59fb48 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -1190,7 +1190,7 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev)
return 0;
}
-static int __exit txx9dmac_chan_remove(struct platform_device *pdev)
+static int txx9dmac_chan_remove(struct platform_device *pdev)
{
struct txx9dmac_chan *dc = platform_get_drvdata(pdev);
@@ -1252,7 +1252,7 @@ static int __init txx9dmac_probe(struct platform_device *pdev)
return 0;
}
-static int __exit txx9dmac_remove(struct platform_device *pdev)
+static int txx9dmac_remove(struct platform_device *pdev)
{
struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
@@ -1299,14 +1299,14 @@ static const struct dev_pm_ops txx9dmac_dev_pm_ops = {
};
static struct platform_driver txx9dmac_chan_driver = {
- .remove = __exit_p(txx9dmac_chan_remove),
+ .remove = txx9dmac_chan_remove,
.driver = {
.name = "txx9dmac-chan",
},
};
static struct platform_driver txx9dmac_driver = {
- .remove = __exit_p(txx9dmac_remove),
+ .remove = txx9dmac_remove,
.shutdown = txx9dmac_shutdown,
.driver = {
.name = "txx9dmac",
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 5899a76..67610a6 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -87,7 +87,7 @@ static struct device *mci_pdev;
/*
* various constants for Memory Controllers
*/
-static const char *mem_types[] = {
+static const char * const mem_types[] = {
[MEM_EMPTY] = "Empty",
[MEM_RESERVED] = "Reserved",
[MEM_UNKNOWN] = "Unknown",
@@ -107,7 +107,7 @@ static const char *mem_types[] = {
[MEM_RDDR3] = "Registered-DDR3"
};
-static const char *dev_types[] = {
+static const char * const dev_types[] = {
[DEV_UNKNOWN] = "Unknown",
[DEV_X1] = "x1",
[DEV_X2] = "x2",
@@ -118,7 +118,7 @@ static const char *dev_types[] = {
[DEV_X64] = "x64"
};
-static const char *edac_caps[] = {
+static const char * const edac_caps[] = {
[EDAC_UNKNOWN] = "Unknown",
[EDAC_NONE] = "None",
[EDAC_RESERVED] = "Reserved",
@@ -327,17 +327,17 @@ static struct device_attribute *dynamic_csrow_dimm_attr[] = {
};
/* possible dynamic channel ce_count attribute files */
-DEVICE_CHANNEL(ch0_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch0_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 0);
-DEVICE_CHANNEL(ch1_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch1_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 1);
-DEVICE_CHANNEL(ch2_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch2_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 2);
-DEVICE_CHANNEL(ch3_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch3_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 3);
-DEVICE_CHANNEL(ch4_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch4_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 4);
-DEVICE_CHANNEL(ch5_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch5_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 5);
/* Total possible dynamic ce_count attribute file table */
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 5168a13..3297301 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -16,7 +16,7 @@ comment "Extcon Device Drivers"
config EXTCON_GPIO
tristate "GPIO extcon support"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
Say Y here to enable GPIO based extcon support. Note that GPIO
extcon supports single state per extcon instance.
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 27ac423..7ef316f 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -389,10 +389,8 @@ static void queue_bus_reset_event(struct client *client)
struct bus_reset_event *e;
e = kzalloc(sizeof(*e), GFP_KERNEL);
- if (e == NULL) {
- fw_notice(client->device->card, "out of memory when allocating event\n");
+ if (e == NULL)
return;
- }
fill_bus_reset_event(&e->reset, client);
@@ -693,10 +691,9 @@ static void handle_request(struct fw_card *card, struct fw_request *request,
r = kmalloc(sizeof(*r), GFP_ATOMIC);
e = kmalloc(sizeof(*e), GFP_ATOMIC);
- if (r == NULL || e == NULL) {
- fw_notice(card, "out of memory when allocating event\n");
+ if (r == NULL || e == NULL)
goto failed;
- }
+
r->card = card;
r->request = request;
r->data = payload;
@@ -930,10 +927,9 @@ static void iso_callback(struct fw_iso_context *context, u32 cycle,
struct iso_interrupt_event *e;
e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
- if (e == NULL) {
- fw_notice(context->card, "out of memory when allocating event\n");
+ if (e == NULL)
return;
- }
+
e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
e->interrupt.closure = client->iso_closure;
e->interrupt.cycle = cycle;
@@ -950,10 +946,9 @@ static void iso_mc_callback(struct fw_iso_context *context,
struct iso_interrupt_mc_event *e;
e = kmalloc(sizeof(*e), GFP_ATOMIC);
- if (e == NULL) {
- fw_notice(context->card, "out of memory when allocating event\n");
+ if (e == NULL)
return;
- }
+
e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL;
e->interrupt.closure = client->iso_closure;
e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer,
@@ -1366,8 +1361,7 @@ static int init_iso_resource(struct client *client,
int ret;
if ((request->channels == 0 && request->bandwidth == 0) ||
- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
- request->bandwidth < 0)
+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
return -EINVAL;
r = kmalloc(sizeof(*r), GFP_KERNEL);
@@ -1582,10 +1576,9 @@ void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
- if (e == NULL) {
- fw_notice(card, "out of memory when allocating event\n");
+ if (e == NULL)
break;
- }
+
e->phy_packet.closure = client->phy_receiver_closure;
e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
e->phy_packet.rcode = RCODE_COMPLETE;
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 03ce7d9..664a6ff 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -692,10 +692,8 @@ static void create_units(struct fw_device *device)
* match the drivers id_tables against it.
*/
unit = kzalloc(sizeof(*unit), GFP_KERNEL);
- if (unit == NULL) {
- fw_err(device->card, "out of memory for unit\n");
+ if (unit == NULL)
continue;
- }
unit->directory = ci.p + value - 1;
unit->device.bus = &fw_bus_type;
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 4d56536..815b0fc 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -356,10 +356,8 @@ static struct fwnet_fragment_info *fwnet_frag_new(
}
new = kmalloc(sizeof(*new), GFP_ATOMIC);
- if (!new) {
- dev_err(&pd->skb->dev->dev, "out of memory\n");
+ if (!new)
return NULL;
- }
new->offset = offset;
new->len = len;
@@ -402,8 +400,6 @@ fail_w_fi:
fail_w_new:
kfree(new);
fail:
- dev_err(&net->dev, "out of memory\n");
-
return NULL;
}
@@ -609,7 +605,6 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
skb = dev_alloc_skb(len + LL_RESERVED_SPACE(net));
if (unlikely(!skb)) {
- dev_err(&net->dev, "out of memory\n");
net->stats.rx_dropped++;
return -ENOMEM;
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 45912e6..9e1db64 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -54,6 +54,10 @@
#include "core.h"
#include "ohci.h"
+#define ohci_info(ohci, f, args...) dev_info(ohci->card.device, f, ##args)
+#define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args)
+#define ohci_err(ohci, f, args...) dev_err(ohci->card.device, f, ##args)
+
#define DESCRIPTOR_OUTPUT_MORE 0
#define DESCRIPTOR_OUTPUT_LAST (1 << 12)
#define DESCRIPTOR_INPUT_MORE (2 << 12)
@@ -68,6 +72,8 @@
#define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
#define DESCRIPTOR_WAIT (3 << 0)
+#define DESCRIPTOR_CMD (0xf << 12)
+
struct descriptor {
__le16 req_count;
__le16 control;
@@ -149,10 +155,11 @@ struct context {
struct descriptor *last;
/*
- * The last descriptor in the DMA program. It contains the branch
+ * The last descriptor block in the DMA program. It contains the branch
* address that must be updated upon appending a new descriptor.
*/
struct descriptor *prev;
+ int prev_z;
descriptor_callback_t callback;
@@ -270,7 +277,9 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
#define PCI_DEVICE_ID_TI_TSB12LV26 0x8020
#define PCI_DEVICE_ID_TI_TSB82AA2 0x8025
+#define PCI_DEVICE_ID_VIA_VT630X 0x3044
#define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
+#define PCI_REV_ID_VIA_VT6306 0x46
#define QUIRK_CYCLE_TIMER 1
#define QUIRK_RESET_PACKET 2
@@ -278,6 +287,8 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
#define QUIRK_NO_1394A 8
#define QUIRK_NO_MSI 16
#define QUIRK_TI_SLLZ059 32
+#define QUIRK_IR_WAKE 64
+#define QUIRK_PHY_LCTRL_TIMEOUT 128
/* In case of multiple matches in ohci_quirks[], only the first one is used. */
static const struct {
@@ -290,7 +301,10 @@ static const struct {
QUIRK_BE_HEADERS},
{PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
- QUIRK_NO_MSI},
+ QUIRK_PHY_LCTRL_TIMEOUT | QUIRK_NO_MSI},
+
+ {PCI_VENDOR_ID_ATT, PCI_ANY_ID, PCI_ANY_ID,
+ QUIRK_PHY_LCTRL_TIMEOUT},
{PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
QUIRK_RESET_PACKET},
@@ -319,6 +333,9 @@ static const struct {
{PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID,
QUIRK_RESET_PACKET},
+ {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT630X, PCI_REV_ID_VIA_VT6306,
+ QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE},
+
{PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
};
@@ -333,6 +350,8 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
", disable MSI = " __stringify(QUIRK_NO_MSI)
", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
+ ", IR wake unreliable = " __stringify(QUIRK_IR_WAKE)
+ ", phy LCtrl timeout = " __stringify(QUIRK_PHY_LCTRL_TIMEOUT)
")");
#define OHCI_PARAM_DEBUG_AT_AR 1
@@ -359,8 +378,7 @@ static void log_irqs(struct fw_ohci *ohci, u32 evt)
!(evt & OHCI1394_busReset))
return;
- dev_notice(ohci->card.device,
- "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
+ ohci_notice(ohci, "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
evt & OHCI1394_selfIDComplete ? " selfID" : "",
evt & OHCI1394_RQPkt ? " AR_req" : "",
evt & OHCI1394_RSPkt ? " AR_resp" : "",
@@ -406,21 +424,19 @@ static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count)
if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
return;
- dev_notice(ohci->card.device,
- "%d selfIDs, generation %d, local node ID %04x\n",
- self_id_count, generation, ohci->node_id);
+ ohci_notice(ohci, "%d selfIDs, generation %d, local node ID %04x\n",
+ self_id_count, generation, ohci->node_id);
for (s = ohci->self_id_buffer; self_id_count--; ++s)
if ((*s & 1 << 23) == 0)
- dev_notice(ohci->card.device,
- "selfID 0: %08x, phy %d [%c%c%c] "
- "%s gc=%d %s %s%s%s\n",
+ ohci_notice(ohci,
+ "selfID 0: %08x, phy %d [%c%c%c] %s gc=%d %s %s%s%s\n",
*s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
speed[*s >> 14 & 3], *s >> 16 & 63,
power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
*s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
else
- dev_notice(ohci->card.device,
+ ohci_notice(ohci,
"selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
*s, *s >> 24 & 63,
_p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
@@ -470,9 +486,8 @@ static void log_ar_at_event(struct fw_ohci *ohci,
evt = 0x1f;
if (evt == OHCI1394_evt_bus_reset) {
- dev_notice(ohci->card.device,
- "A%c evt_bus_reset, generation %d\n",
- dir, (header[2] >> 16) & 0xff);
+ ohci_notice(ohci, "A%c evt_bus_reset, generation %d\n",
+ dir, (header[2] >> 16) & 0xff);
return;
}
@@ -491,32 +506,26 @@ static void log_ar_at_event(struct fw_ohci *ohci,
switch (tcode) {
case 0xa:
- dev_notice(ohci->card.device,
- "A%c %s, %s\n",
- dir, evts[evt], tcodes[tcode]);
+ ohci_notice(ohci, "A%c %s, %s\n",
+ dir, evts[evt], tcodes[tcode]);
break;
case 0xe:
- dev_notice(ohci->card.device,
- "A%c %s, PHY %08x %08x\n",
- dir, evts[evt], header[1], header[2]);
+ ohci_notice(ohci, "A%c %s, PHY %08x %08x\n",
+ dir, evts[evt], header[1], header[2]);
break;
case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
- dev_notice(ohci->card.device,
- "A%c spd %x tl %02x, "
- "%04x -> %04x, %s, "
- "%s, %04x%08x%s\n",
- dir, speed, header[0] >> 10 & 0x3f,
- header[1] >> 16, header[0] >> 16, evts[evt],
- tcodes[tcode], header[1] & 0xffff, header[2], specific);
+ ohci_notice(ohci,
+ "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %04x%08x%s\n",
+ dir, speed, header[0] >> 10 & 0x3f,
+ header[1] >> 16, header[0] >> 16, evts[evt],
+ tcodes[tcode], header[1] & 0xffff, header[2], specific);
break;
default:
- dev_notice(ohci->card.device,
- "A%c spd %x tl %02x, "
- "%04x -> %04x, %s, "
- "%s%s\n",
- dir, speed, header[0] >> 10 & 0x3f,
- header[1] >> 16, header[0] >> 16, evts[evt],
- tcodes[tcode], specific);
+ ohci_notice(ohci,
+ "A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n",
+ dir, speed, header[0] >> 10 & 0x3f,
+ header[1] >> 16, header[0] >> 16, evts[evt],
+ tcodes[tcode], specific);
}
}
@@ -563,7 +572,8 @@ static int read_phy_reg(struct fw_ohci *ohci, int addr)
if (i >= 3)
msleep(1);
}
- dev_err(ohci->card.device, "failed to read phy reg\n");
+ ohci_err(ohci, "failed to read phy reg %d\n", addr);
+ dump_stack();
return -EBUSY;
}
@@ -585,7 +595,8 @@ static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
if (i >= 3)
msleep(1);
}
- dev_err(ohci->card.device, "failed to write phy reg\n");
+ ohci_err(ohci, "failed to write phy reg %d, val %u\n", addr, val);
+ dump_stack();
return -EBUSY;
}
@@ -690,8 +701,7 @@ static void ar_context_abort(struct ar_context *ctx, const char *error_msg)
reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
flush_writes(ohci);
- dev_err(ohci->card.device, "AR error: %s; DMA stopped\n",
- error_msg);
+ ohci_err(ohci, "AR error: %s; DMA stopped\n", error_msg);
}
/* FIXME: restart? */
}
@@ -1157,6 +1167,7 @@ static int context_init(struct context *ctx, struct fw_ohci *ohci,
ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
ctx->last = ctx->buffer_tail->buffer;
ctx->prev = ctx->buffer_tail->buffer;
+ ctx->prev_z = 1;
return 0;
}
@@ -1221,14 +1232,35 @@ static void context_append(struct context *ctx,
{
dma_addr_t d_bus;
struct descriptor_buffer *desc = ctx->buffer_tail;
+ struct descriptor *d_branch;
d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
desc->used += (z + extra) * sizeof(*d);
wmb(); /* finish init of new descriptors before branch_address update */
- ctx->prev->branch_address = cpu_to_le32(d_bus | z);
- ctx->prev = find_branch_descriptor(d, z);
+
+ d_branch = find_branch_descriptor(ctx->prev, ctx->prev_z);
+ d_branch->branch_address = cpu_to_le32(d_bus | z);
+
+ /*
+ * VT6306 incorrectly checks only the single descriptor at the
+ * CommandPtr when the wake bit is written, so if it's a
+ * multi-descriptor block starting with an INPUT_MORE, put a copy of
+ * the branch address in the first descriptor.
+ *
+ * Not doing this for transmit contexts since not sure how it interacts
+ * with skip addresses.
+ */
+ if (unlikely(ctx->ohci->quirks & QUIRK_IR_WAKE) &&
+ d_branch != ctx->prev &&
+ (ctx->prev->control & cpu_to_le16(DESCRIPTOR_CMD)) ==
+ cpu_to_le16(DESCRIPTOR_INPUT_MORE)) {
+ ctx->prev->branch_address = cpu_to_le32(d_bus | z);
+ }
+
+ ctx->prev = d;
+ ctx->prev_z = z;
}
static void context_stop(struct context *ctx)
@@ -1248,7 +1280,7 @@ static void context_stop(struct context *ctx)
if (i)
udelay(10);
}
- dev_err(ohci->card.device, "DMA context still active (0x%08x)\n", reg);
+ ohci_err(ohci, "DMA context still active (0x%08x)\n", reg);
}
struct driver_data {
@@ -1557,7 +1589,7 @@ static void handle_local_lock(struct fw_ohci *ohci,
goto out;
}
- dev_err(ohci->card.device, "swap not done (CSR lock timeout)\n");
+ ohci_err(ohci, "swap not done (CSR lock timeout)\n");
fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
out:
@@ -1632,8 +1664,7 @@ static void detect_dead_context(struct fw_ohci *ohci,
ctl = reg_read(ohci, CONTROL_SET(regs));
if (ctl & CONTEXT_DEAD)
- dev_err(ohci->card.device,
- "DMA context %s has stopped, error code: %s\n",
+ ohci_err(ohci, "DMA context %s has stopped, error code: %s\n",
name, evts[ctl & 0x1f]);
}
@@ -1815,8 +1846,8 @@ static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
reg = reg_read(ohci, OHCI1394_NodeID);
if (!(reg & OHCI1394_NodeID_idValid)) {
- dev_notice(ohci->card.device,
- "node ID not valid, new bus reset in progress\n");
+ ohci_notice(ohci,
+ "node ID not valid, new bus reset in progress\n");
return -EBUSY;
}
self_id |= ((reg & 0x3f) << 24); /* phy ID */
@@ -1863,12 +1894,12 @@ static void bus_reset_work(struct work_struct *work)
reg = reg_read(ohci, OHCI1394_NodeID);
if (!(reg & OHCI1394_NodeID_idValid)) {
- dev_notice(ohci->card.device,
- "node ID not valid, new bus reset in progress\n");
+ ohci_notice(ohci,
+ "node ID not valid, new bus reset in progress\n");
return;
}
if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
- dev_notice(ohci->card.device, "malconfigured bus\n");
+ ohci_notice(ohci, "malconfigured bus\n");
return;
}
ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
@@ -1882,7 +1913,7 @@ static void bus_reset_work(struct work_struct *work)
reg = reg_read(ohci, OHCI1394_SelfIDCount);
if (reg & OHCI1394_SelfIDCount_selfIDError) {
- dev_notice(ohci->card.device, "inconsistent self IDs\n");
+ ohci_notice(ohci, "self ID receive error\n");
return;
}
/*
@@ -1894,7 +1925,7 @@ static void bus_reset_work(struct work_struct *work)
self_id_count = (reg >> 3) & 0xff;
if (self_id_count > 252) {
- dev_notice(ohci->card.device, "inconsistent self IDs\n");
+ ohci_notice(ohci, "bad selfIDSize (%08x)\n", reg);
return;
}
@@ -1902,7 +1933,10 @@ static void bus_reset_work(struct work_struct *work)
rmb();
for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
- if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) {
+ u32 id = cond_le32_to_cpu(ohci->self_id_cpu[i]);
+ u32 id2 = cond_le32_to_cpu(ohci->self_id_cpu[i + 1]);
+
+ if (id != ~id2) {
/*
* If the invalid data looks like a cycle start packet,
* it's likely to be the result of the cycle master
@@ -1910,33 +1944,30 @@ static void bus_reset_work(struct work_struct *work)
* so far are valid and should be processed so that the
* bus manager can then correct the gap count.
*/
- if (cond_le32_to_cpu(ohci->self_id_cpu[i])
- == 0xffff008f) {
- dev_notice(ohci->card.device,
- "ignoring spurious self IDs\n");
+ if (id == 0xffff008f) {
+ ohci_notice(ohci, "ignoring spurious self IDs\n");
self_id_count = j;
break;
- } else {
- dev_notice(ohci->card.device,
- "inconsistent self IDs\n");
- return;
}
+
+ ohci_notice(ohci, "bad self ID %d/%d (%08x != ~%08x)\n",
+ j, self_id_count, id, id2);
+ return;
}
- ohci->self_id_buffer[j] =
- cond_le32_to_cpu(ohci->self_id_cpu[i]);
+ ohci->self_id_buffer[j] = id;
}
if (ohci->quirks & QUIRK_TI_SLLZ059) {
self_id_count = find_and_insert_self_id(ohci, self_id_count);
if (self_id_count < 0) {
- dev_notice(ohci->card.device,
- "could not construct local self ID\n");
+ ohci_notice(ohci,
+ "could not construct local self ID\n");
return;
}
}
if (self_id_count == 0) {
- dev_notice(ohci->card.device, "inconsistent self IDs\n");
+ ohci_notice(ohci, "no self IDs\n");
return;
}
rmb();
@@ -1957,8 +1988,7 @@ static void bus_reset_work(struct work_struct *work)
new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
if (new_generation != generation) {
- dev_notice(ohci->card.device,
- "new bus reset, discarding self ids\n");
+ ohci_notice(ohci, "new bus reset, discarding self ids\n");
return;
}
@@ -2096,7 +2126,7 @@ static irqreturn_t irq_handler(int irq, void *data)
}
if (unlikely(event & OHCI1394_regAccessFail))
- dev_err(ohci->card.device, "register access failure\n");
+ ohci_err(ohci, "register access failure\n");
if (unlikely(event & OHCI1394_postedWriteErr)) {
reg_read(ohci, OHCI1394_PostedWriteAddressHi);
@@ -2104,13 +2134,12 @@ static irqreturn_t irq_handler(int irq, void *data)
reg_write(ohci, OHCI1394_IntEventClear,
OHCI1394_postedWriteErr);
if (printk_ratelimit())
- dev_err(ohci->card.device, "PCI posted write error\n");
+ ohci_err(ohci, "PCI posted write error\n");
}
if (unlikely(event & OHCI1394_cycleTooLong)) {
if (printk_ratelimit())
- dev_notice(ohci->card.device,
- "isochronous cycle too long\n");
+ ohci_notice(ohci, "isochronous cycle too long\n");
reg_write(ohci, OHCI1394_LinkControlSet,
OHCI1394_LinkControl_cycleMaster);
}
@@ -2123,8 +2152,7 @@ static irqreturn_t irq_handler(int irq, void *data)
* them at least two cycles later. (FIXME?)
*/
if (printk_ratelimit())
- dev_notice(ohci->card.device,
- "isochronous cycle inconsistent\n");
+ ohci_notice(ohci, "isochronous cycle inconsistent\n");
}
if (unlikely(event & OHCI1394_unrecoverableError))
@@ -2246,12 +2274,11 @@ static int ohci_enable(struct fw_card *card,
const __be32 *config_rom, size_t length)
{
struct fw_ohci *ohci = fw_ohci(card);
- struct pci_dev *dev = to_pci_dev(card->device);
u32 lps, version, irqs;
int i, ret;
if (software_reset(ohci)) {
- dev_err(card->device, "failed to reset ohci card\n");
+ ohci_err(ohci, "failed to reset ohci card\n");
return -EBUSY;
}
@@ -2262,20 +2289,31 @@ static int ohci_enable(struct fw_card *card,
* will lock up the machine. Wait 50msec to make sure we have
* full link enabled. However, with some cards (well, at least
* a JMicron PCIe card), we have to try again sometimes.
+ *
+ * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but
+ * cannot actually use the phy at that time. These need tens of
+ * millisecods pause between LPS write and first phy access too.
+ *
+ * But do not wait for 50msec on Agere/LSI cards. Their phy
+ * arbitration state machine may time out during such a long wait.
*/
+
reg_write(ohci, OHCI1394_HCControlSet,
OHCI1394_HCControl_LPS |
OHCI1394_HCControl_postedWriteEnable);
flush_writes(ohci);
- for (lps = 0, i = 0; !lps && i < 3; i++) {
+ if (!(ohci->quirks & QUIRK_PHY_LCTRL_TIMEOUT))
msleep(50);
+
+ for (lps = 0, i = 0; !lps && i < 150; i++) {
+ msleep(1);
lps = reg_read(ohci, OHCI1394_HCControlSet) &
OHCI1394_HCControl_LPS;
}
if (!lps) {
- dev_err(card->device, "failed to set Link Power Status\n");
+ ohci_err(ohci, "failed to set Link Power Status\n");
return -EIO;
}
@@ -2284,7 +2322,7 @@ static int ohci_enable(struct fw_card *card,
if (ret < 0)
return ret;
if (ret)
- dev_notice(card->device, "local TSB41BA3D phy\n");
+ ohci_notice(ohci, "local TSB41BA3D phy\n");
else
ohci->quirks &= ~QUIRK_TI_SLLZ059;
}
@@ -2382,24 +2420,6 @@ static int ohci_enable(struct fw_card *card,
reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
- if (!(ohci->quirks & QUIRK_NO_MSI))
- pci_enable_msi(dev);
- if (request_irq(dev->irq, irq_handler,
- pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED,
- ohci_driver_name, ohci)) {
- dev_err(card->device, "failed to allocate interrupt %d\n",
- dev->irq);
- pci_disable_msi(dev);
-
- if (config_rom) {
- dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
- ohci->next_config_rom,
- ohci->next_config_rom_bus);
- ohci->next_config_rom = NULL;
- }
- return -EIO;
- }
-
irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
OHCI1394_RQPkt | OHCI1394_RSPkt |
OHCI1394_isochTx | OHCI1394_isochRx |
@@ -3578,20 +3598,20 @@ static int pci_probe(struct pci_dev *dev,
if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) ||
pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) {
- dev_err(&dev->dev, "invalid MMIO resource\n");
+ ohci_err(ohci, "invalid MMIO resource\n");
err = -ENXIO;
goto fail_disable;
}
err = pci_request_region(dev, 0, ohci_driver_name);
if (err) {
- dev_err(&dev->dev, "MMIO resource unavailable\n");
+ ohci_err(ohci, "MMIO resource unavailable\n");
goto fail_disable;
}
ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
if (ohci->registers == NULL) {
- dev_err(&dev->dev, "failed to remap registers\n");
+ ohci_err(ohci, "failed to remap registers\n");
err = -ENXIO;
goto fail_iomem;
}
@@ -3675,19 +3695,33 @@ static int pci_probe(struct pci_dev *dev,
guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
reg_read(ohci, OHCI1394_GUIDLo);
+ if (!(ohci->quirks & QUIRK_NO_MSI))
+ pci_enable_msi(dev);
+ if (request_irq(dev->irq, irq_handler,
+ pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED,
+ ohci_driver_name, ohci)) {
+ ohci_err(ohci, "failed to allocate interrupt %d\n", dev->irq);
+ err = -EIO;
+ goto fail_msi;
+ }
+
err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
if (err)
- goto fail_contexts;
+ goto fail_irq;
version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
- dev_notice(&dev->dev,
- "added OHCI v%x.%x device as card %d, "
- "%d IR + %d IT contexts, quirks 0x%x\n",
- version >> 16, version & 0xff, ohci->card.index,
- ohci->n_ir, ohci->n_it, ohci->quirks);
+ ohci_notice(ohci,
+ "added OHCI v%x.%x device as card %d, "
+ "%d IR + %d IT contexts, quirks 0x%x\n",
+ version >> 16, version & 0xff, ohci->card.index,
+ ohci->n_ir, ohci->n_it, ohci->quirks);
return 0;
+ fail_irq:
+ free_irq(dev->irq, ohci);
+ fail_msi:
+ pci_disable_msi(dev);
fail_contexts:
kfree(ohci->ir_context_list);
kfree(ohci->it_context_list);
@@ -3711,19 +3745,21 @@ static int pci_probe(struct pci_dev *dev,
kfree(ohci);
pmac_ohci_off(dev);
fail:
- if (err == -ENOMEM)
- dev_err(&dev->dev, "out of memory\n");
-
return err;
}
static void pci_remove(struct pci_dev *dev)
{
- struct fw_ohci *ohci;
+ struct fw_ohci *ohci = pci_get_drvdata(dev);
- ohci = pci_get_drvdata(dev);
- reg_write(ohci, OHCI1394_IntMaskClear, ~0);
- flush_writes(ohci);
+ /*
+ * If the removal is happening from the suspend state, LPS won't be
+ * enabled and host registers (eg., IntMaskClear) won't be accessible.
+ */
+ if (reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS) {
+ reg_write(ohci, OHCI1394_IntMaskClear, ~0);
+ flush_writes(ohci);
+ }
cancel_work_sync(&ohci->bus_reset_work);
fw_core_remove_card(&ohci->card);
@@ -3766,16 +3802,14 @@ static int pci_suspend(struct pci_dev *dev, pm_message_t state)
int err;
software_reset(ohci);
- free_irq(dev->irq, ohci);
- pci_disable_msi(dev);
err = pci_save_state(dev);
if (err) {
- dev_err(&dev->dev, "pci_save_state failed\n");
+ ohci_err(ohci, "pci_save_state failed\n");
return err;
}
err = pci_set_power_state(dev, pci_choose_state(dev, state));
if (err)
- dev_err(&dev->dev, "pci_set_power_state failed with %d\n", err);
+ ohci_err(ohci, "pci_set_power_state failed with %d\n", err);
pmac_ohci_off(dev);
return 0;
@@ -3791,7 +3825,7 @@ static int pci_resume(struct pci_dev *dev)
pci_restore_state(dev);
err = pci_enable_device(dev);
if (err) {
- dev_err(&dev->dev, "pci_enable_device failed\n");
+ ohci_err(ohci, "pci_enable_device failed\n");
return err;
}
@@ -3837,6 +3871,4 @@ MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
MODULE_LICENSE("GPL");
/* Provide a module alias so root-on-sbp2 initrds don't break. */
-#ifndef CONFIG_IEEE1394_OHCI1394_MODULE
MODULE_ALIAS("ohci1394");
-#endif
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 1162d6b..47674b9 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1144,8 +1144,8 @@ static int sbp2_probe(struct device *dev)
return -ENODEV;
if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE)
- BUG_ON(dma_set_max_seg_size(device->card->device,
- SBP2_MAX_SEG_SIZE));
+ WARN_ON(dma_set_max_seg_size(device->card->device,
+ SBP2_MAX_SEG_SIZE));
shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt));
if (shost == NULL)
@@ -1475,10 +1475,8 @@ static int sbp2_scsi_queuecommand(struct Scsi_Host *shost,
}
orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
- if (orb == NULL) {
- dev_notice(lu_dev(lu), "failed to alloc ORB\n");
+ if (orb == NULL)
return SCSI_MLQUEUE_HOST_BUSY;
- }
/* Initialize rcode to something not RCODE_COMPLETE. */
orb->base.rcode = -1;
@@ -1636,9 +1634,7 @@ MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
/* Provide a module alias so root-on-sbp2 initrds don't break. */
-#ifndef CONFIG_IEEE1394_SBP2_MODULE
MODULE_ALIAS("sbp2");
-#endif
static int __init sbp2_init(void)
{
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index c22eed9..87d5670 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -38,7 +38,6 @@ config GPIO_DEVRES
menuconfig GPIOLIB
bool "GPIO Support"
depends on ARCH_WANT_OPTIONAL_GPIOLIB || ARCH_REQUIRE_GPIOLIB
- select GENERIC_GPIO
help
This enables GPIO support through the generic GPIO library.
You only need to enable this, if you also want to enable
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index dda6a75..90a80eb 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -255,7 +255,7 @@ static int __get_gpo_state_p3(struct lpc32xx_gpio_chip *group,
}
/*
- * GENERIC_GPIO primitives.
+ * GPIO primitives.
*/
static int lpc32xx_gpio_dir_input_p012(struct gpio_chip *chip,
unsigned pin)
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 6961bbe..264f550 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1685,6 +1685,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
@@ -2341,7 +2342,7 @@ struct hid_device *hid_allocate_device(void)
init_waitqueue_head(&hdev->debug_wait);
INIT_LIST_HEAD(&hdev->debug_list);
- mutex_init(&hdev->debug_list_lock);
+ spin_lock_init(&hdev->debug_list_lock);
sema_init(&hdev->driver_lock, 1);
sema_init(&hdev->driver_input_lock, 1);
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 7e56cb3..8453214 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -579,15 +579,16 @@ void hid_debug_event(struct hid_device *hdev, char *buf)
{
int i;
struct hid_debug_list *list;
+ unsigned long flags;
- mutex_lock(&hdev->debug_list_lock);
+ spin_lock_irqsave(&hdev->debug_list_lock, flags);
list_for_each_entry(list, &hdev->debug_list, node) {
for (i = 0; i < strlen(buf); i++)
list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] =
buf[i];
list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE;
}
- mutex_unlock(&hdev->debug_list_lock);
+ spin_unlock_irqrestore(&hdev->debug_list_lock, flags);
wake_up_interruptible(&hdev->debug_wait);
}
@@ -977,6 +978,7 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
{
int err = 0;
struct hid_debug_list *list;
+ unsigned long flags;
if (!(list = kzalloc(sizeof(struct hid_debug_list), GFP_KERNEL))) {
err = -ENOMEM;
@@ -992,9 +994,9 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
file->private_data = list;
mutex_init(&list->read_mutex);
- mutex_lock(&list->hdev->debug_list_lock);
+ spin_lock_irqsave(&list->hdev->debug_list_lock, flags);
list_add_tail(&list->node, &list->hdev->debug_list);
- mutex_unlock(&list->hdev->debug_list_lock);
+ spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
out:
return err;
@@ -1088,10 +1090,11 @@ static unsigned int hid_debug_events_poll(struct file *file, poll_table *wait)
static int hid_debug_events_release(struct inode *inode, struct file *file)
{
struct hid_debug_list *list = file->private_data;
+ unsigned long flags;
- mutex_lock(&list->hdev->debug_list_lock);
+ spin_lock_irqsave(&list->hdev->debug_list_lock, flags);
list_del(&list->node);
- mutex_unlock(&list->hdev->debug_list_lock);
+ spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
kfree(list->hid_debug_buf);
kfree(list);
diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c
index 9b0efb0..d164911 100644
--- a/drivers/hid/hid-steelseries.c
+++ b/drivers/hid/hid-steelseries.c
@@ -18,7 +18,8 @@
#include "hid-ids.h"
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
+ (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES))
#define SRWS1_NUMBER_LEDS 15
struct steelseries_srws1_data {
__u16 led_state;
@@ -107,7 +108,8 @@ static __u8 steelseries_srws1_rdesc_fixed[] = {
0xC0 /* End Collection */
};
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
+ (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES))
static void steelseries_srws1_set_leds(struct hid_device *hdev, __u16 leds)
{
struct list_head *report_list = &hdev->report_enum[HID_OUTPUT_REPORT].report_list;
@@ -370,7 +372,8 @@ MODULE_DEVICE_TABLE(hid, steelseries_srws1_devices);
static struct hid_driver steelseries_srws1_driver = {
.name = "steelseries_srws1",
.id_table = steelseries_srws1_devices,
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
+ (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES))
.probe = steelseries_srws1_probe,
.remove = steelseries_srws1_remove,
#endif
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index adfee98..631736e 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -363,7 +363,7 @@ config I2C_BLACKFIN_TWI_CLK_KHZ
config I2C_CBUS_GPIO
tristate "CBUS I2C driver"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
Support for CBUS access using I2C API. Mostly relevant for Nokia
Internet Tablets (770, N800 and N810).
@@ -436,7 +436,7 @@ config I2C_EG20T
config I2C_GPIO
tristate "GPIO-based bitbanging I2C"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select I2C_ALGOBIT
help
This is a very simple bitbanging I2C driver utilizing the
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index 5faf244..f7f9865 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -7,7 +7,7 @@ menu "Multiplexer I2C Chip support"
config I2C_ARB_GPIO_CHALLENGE
tristate "GPIO-based I2C arbitration"
- depends on GENERIC_GPIO && OF
+ depends on GPIOLIB && OF
help
If you say yes to this option, support will be included for an
I2C multimaster arbitration scheme using GPIOs and a challenge &
@@ -19,7 +19,7 @@ config I2C_ARB_GPIO_CHALLENGE
config I2C_MUX_GPIO
tristate "GPIO-based I2C multiplexer"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
If you say yes to this option, support will be included for a
GPIO based I2C multiplexer. This driver provides access to
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 6a195d5..62a2c0e 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -175,7 +175,7 @@ config KEYBOARD_EP93XX
config KEYBOARD_GPIO
tristate "GPIO Buttons"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
This driver implements support for buttons connected
to GPIO pins of various CPUs (and some other chips).
@@ -190,7 +190,7 @@ config KEYBOARD_GPIO
config KEYBOARD_GPIO_POLLED
tristate "Polled GPIO buttons"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select INPUT_POLLDEV
help
This driver implements support for buttons connected
@@ -241,7 +241,7 @@ config KEYBOARD_TCA8418
config KEYBOARD_MATRIX
tristate "GPIO driven matrix keypad support"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select INPUT_MATRIXKMAP
help
Enable support for GPIO driven matrix keypad.
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index af80928..bb698e1 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -214,7 +214,7 @@ config INPUT_APANEL
config INPUT_GP2A
tristate "Sharp GP2AP002A00F I2C Proximity/Opto sensor driver"
depends on I2C
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
Say Y here if you have a Sharp GP2AP002A00F proximity/als combo-chip
hooked to an I2C bus.
@@ -224,7 +224,7 @@ config INPUT_GP2A
config INPUT_GPIO_TILT_POLLED
tristate "Polled GPIO tilt switch"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select INPUT_POLLDEV
help
This driver implements support for tilt switches connected
@@ -472,7 +472,7 @@ config INPUT_PWM_BEEPER
config INPUT_GPIO_ROTARY_ENCODER
tristate "Rotary encoders connected to GPIO pins"
- depends on GPIOLIB && GENERIC_GPIO
+ depends on GPIOLIB
help
Say Y here to add support for rotary encoders connected to GPIO lines.
Check file:Documentation/input/rotary-encoder.txt for more
@@ -484,7 +484,7 @@ config INPUT_GPIO_ROTARY_ENCODER
config INPUT_RB532_BUTTON
tristate "Mikrotik Routerboard 532 button interface"
depends on MIKROTIK_RB532
- depends on GPIOLIB && GENERIC_GPIO
+ depends on GPIOLIB
select INPUT_POLLDEV
help
Say Y here if you want support for the S1 button built into
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 802bd6a..effa9c5 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -295,7 +295,7 @@ config MOUSE_VSXXXAA
config MOUSE_GPIO
tristate "GPIO mouse"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select INPUT_POLLDEV
help
This driver simulates a mouse on GPIO lines of various CPUs (and some
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index d44806d..ef99229 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -173,7 +173,7 @@ config LEDS_PCA9532_GPIO
config LEDS_GPIO
tristate "LED Support for GPIO connected LEDs"
depends on LEDS_CLASS
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
This option enables support for the LEDs connected to GPIO
outputs. To be useful the particular board must have LEDs
@@ -362,7 +362,7 @@ config LEDS_INTEL_SS4200
config LEDS_LT3593
tristate "LED driver for LT3593 controllers"
depends on LEDS_CLASS
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
This option enables support for LEDs driven by a Linear Technology
LT3593 controller. This controller uses a special one-wire pulse
@@ -431,7 +431,7 @@ config LEDS_ASIC3
config LEDS_RENESAS_TPU
bool "LED support for Renesas TPU"
- depends on LEDS_CLASS=y && HAVE_CLK && GENERIC_GPIO
+ depends on LEDS_CLASS=y && HAVE_CLK && GPIOLIB
help
This option enables build of the LED TPU platform driver,
suitable to drive any TPU channel on newer Renesas SoCs.
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index c608313..0387e05 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -319,6 +319,9 @@ static void __cache_size_refresh(void)
static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
enum data_mode *data_mode)
{
+ unsigned noio_flag;
+ void *ptr;
+
if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
*data_mode = DATA_MODE_SLAB;
return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
@@ -332,7 +335,26 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
}
*data_mode = DATA_MODE_VMALLOC;
- return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
+
+ /*
+ * __vmalloc allocates the data pages and auxiliary structures with
+ * gfp_flags that were specified, but pagetables are always allocated
+ * with GFP_KERNEL, no matter what was specified as gfp_mask.
+ *
+ * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
+ * all allocations done by this process (including pagetables) are done
+ * as if GFP_NOIO was specified.
+ */
+
+ if (gfp_mask & __GFP_NORETRY)
+ noio_flag = memalloc_noio_save();
+
+ ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
+
+ if (gfp_mask & __GFP_NORETRY)
+ memalloc_noio_restore(noio_flag);
+
+ return ptr;
}
/*
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 83e995f..1af7255 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -1044,7 +1044,7 @@ void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
struct dm_cache_statistics *stats)
{
down_read(&cmd->root_lock);
- memcpy(stats, &cmd->stats, sizeof(*stats));
+ *stats = cmd->stats;
up_read(&cmd->root_lock);
}
@@ -1052,7 +1052,7 @@ void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
struct dm_cache_statistics *stats)
{
down_write(&cmd->root_lock);
- memcpy(&cmd->stats, stats, sizeof(*stats));
+ cmd->stats = *stats;
up_write(&cmd->root_lock);
}
diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
index 558bdfd..33369ca 100644
--- a/drivers/md/dm-cache-policy.h
+++ b/drivers/md/dm-cache-policy.h
@@ -130,8 +130,8 @@ struct dm_cache_policy {
*
* Must not block.
*
- * Returns 1 iff in cache, 0 iff not, < 0 on error (-EWOULDBLOCK
- * would be typical).
+ * Returns 0 if in cache, -ENOENT if not, < 0 for other errors
+ * (-EWOULDBLOCK would be typical).
*/
int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock);
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1074409..df44b60 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -205,7 +205,7 @@ struct per_bio_data {
/*
* writethrough fields. These MUST remain at the end of this
* structure and the 'cache' member must be the first as it
- * is used to determine the offsetof the writethrough fields.
+ * is used to determine the offset of the writethrough fields.
*/
struct cache *cache;
dm_cblock_t cblock;
@@ -393,7 +393,7 @@ static int get_cell(struct cache *cache,
return r;
}
- /*----------------------------------------------------------------*/
+/*----------------------------------------------------------------*/
static bool is_dirty(struct cache *cache, dm_cblock_t b)
{
@@ -419,6 +419,7 @@ static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cbl
}
/*----------------------------------------------------------------*/
+
static bool block_size_is_power_of_two(struct cache *cache)
{
return cache->sectors_per_block_shift >= 0;
@@ -667,7 +668,7 @@ static void writethrough_endio(struct bio *bio, int err)
/*
* We can't issue this bio directly, since we're in interrupt
- * context. So it get's put on a bio list for processing by the
+ * context. So it gets put on a bio list for processing by the
* worker thread.
*/
defer_writethrough_bio(pb->cache, bio);
@@ -1445,6 +1446,7 @@ static void do_worker(struct work_struct *ws)
static void do_waker(struct work_struct *ws)
{
struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
+ policy_tick(cache->policy);
wake_worker(cache);
queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
}
@@ -1809,7 +1811,37 @@ static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
static struct kmem_cache *migration_cache;
-static int set_config_values(struct dm_cache_policy *p, int argc, const char **argv)
+#define NOT_CORE_OPTION 1
+
+static int process_config_option(struct cache *cache, const char *key, const char *value)
+{
+ unsigned long tmp;
+
+ if (!strcasecmp(key, "migration_threshold")) {
+ if (kstrtoul(value, 10, &tmp))
+ return -EINVAL;
+
+ cache->migration_threshold = tmp;
+ return 0;
+ }
+
+ return NOT_CORE_OPTION;
+}
+
+static int set_config_value(struct cache *cache, const char *key, const char *value)
+{
+ int r = process_config_option(cache, key, value);
+
+ if (r == NOT_CORE_OPTION)
+ r = policy_set_config_value(cache->policy, key, value);
+
+ if (r)
+ DMWARN("bad config value for %s: %s", key, value);
+
+ return r;
+}
+
+static int set_config_values(struct cache *cache, int argc, const char **argv)
{
int r = 0;
@@ -1819,12 +1851,9 @@ static int set_config_values(struct dm_cache_policy *p, int argc, const char **a
}
while (argc) {
- r = policy_set_config_value(p, argv[0], argv[1]);
- if (r) {
- DMWARN("policy_set_config_value failed: key = '%s', value = '%s'",
- argv[0], argv[1]);
- return r;
- }
+ r = set_config_value(cache, argv[0], argv[1]);
+ if (r)
+ break;
argc -= 2;
argv += 2;
@@ -1836,8 +1865,6 @@ static int set_config_values(struct dm_cache_policy *p, int argc, const char **a
static int create_cache_policy(struct cache *cache, struct cache_args *ca,
char **error)
{
- int r;
-
cache->policy = dm_cache_policy_create(ca->policy_name,
cache->cache_size,
cache->origin_sectors,
@@ -1847,14 +1874,7 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca,
return -ENOMEM;
}
- r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv);
- if (r) {
- *error = "Error setting cache policy's config values";
- dm_cache_policy_destroy(cache->policy);
- cache->policy = NULL;
- }
-
- return r;
+ return 0;
}
/*
@@ -1886,7 +1906,7 @@ static sector_t calculate_discard_block_size(sector_t cache_block_size,
return discard_block_size;
}
-#define DEFAULT_MIGRATION_THRESHOLD (2048 * 100)
+#define DEFAULT_MIGRATION_THRESHOLD 2048
static int cache_create(struct cache_args *ca, struct cache **result)
{
@@ -1911,7 +1931,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
ti->discards_supported = true;
ti->discard_zeroes_data_unsupported = true;
- memcpy(&cache->features, &ca->features, sizeof(cache->features));
+ cache->features = ca->features;
ti->per_bio_data_size = get_per_bio_data_size(cache);
cache->callbacks.congested_fn = cache_is_congested;
@@ -1948,7 +1968,15 @@ static int cache_create(struct cache_args *ca, struct cache **result)
r = create_cache_policy(cache, ca, error);
if (r)
goto bad;
+
cache->policy_nr_args = ca->policy_argc;
+ cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
+
+ r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
+ if (r) {
+ *error = "Error setting cache policy's config values";
+ goto bad;
+ }
cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
ca->block_size, may_format,
@@ -1967,10 +1995,10 @@ static int cache_create(struct cache_args *ca, struct cache **result)
INIT_LIST_HEAD(&cache->quiesced_migrations);
INIT_LIST_HEAD(&cache->completed_migrations);
INIT_LIST_HEAD(&cache->need_commit_migrations);
- cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
atomic_set(&cache->nr_migrations, 0);
init_waitqueue_head(&cache->migration_wait);
+ r = -ENOMEM;
cache->nr_dirty = 0;
cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
if (!cache->dirty_bitset) {
@@ -2517,23 +2545,6 @@ err:
DMEMIT("Error");
}
-#define NOT_CORE_OPTION 1
-
-static int process_config_option(struct cache *cache, char **argv)
-{
- unsigned long tmp;
-
- if (!strcasecmp(argv[0], "migration_threshold")) {
- if (kstrtoul(argv[1], 10, &tmp))
- return -EINVAL;
-
- cache->migration_threshold = tmp;
- return 0;
- }
-
- return NOT_CORE_OPTION;
-}
-
/*
* Supports <key> <value>.
*
@@ -2541,17 +2552,12 @@ static int process_config_option(struct cache *cache, char **argv)
*/
static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
{
- int r;
struct cache *cache = ti->private;
if (argc != 2)
return -EINVAL;
- r = process_config_option(cache, argv);
- if (r == NOT_CORE_OPTION)
- return policy_set_config_value(cache->policy, argv[0], argv[1]);
-
- return r;
+ return set_config_value(cache, argv[0], argv[1]);
}
static int cache_iterate_devices(struct dm_target *ti,
@@ -2609,7 +2615,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type cache_target = {
.name = "cache",
- .version = {1, 1, 0},
+ .version = {1, 1, 1},
.module = THIS_MODULE,
.ctr = cache_ctr,
.dtr = cache_dtr,
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 51bb816..bdf26f5 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -907,6 +907,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
+ ti->num_write_same_bios = 1;
return 0;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index c0e0702..c434e5a 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1121,6 +1121,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
if (!s->pending_pool) {
ti->error = "Could not allocate mempool for pending exceptions";
+ r = -ENOMEM;
goto bad_pending_pool;
}
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index ea5e878..d907ca6 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -94,7 +94,7 @@ static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct stripe_c *sc;
- sector_t width;
+ sector_t width, tmp_len;
uint32_t stripes;
uint32_t chunk_size;
int r;
@@ -116,15 +116,16 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
width = ti->len;
- if (sector_div(width, chunk_size)) {
+ if (sector_div(width, stripes)) {
ti->error = "Target length not divisible by "
- "chunk size";
+ "number of stripes";
return -EINVAL;
}
- if (sector_div(width, stripes)) {
+ tmp_len = width;
+ if (sector_div(tmp_len, chunk_size)) {
ti->error = "Target length not divisible by "
- "number of stripes";
+ "chunk size";
return -EINVAL;
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index e50dad0..1ff252a 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1442,7 +1442,7 @@ static bool dm_table_supports_write_same(struct dm_table *t)
return false;
if (!ti->type->iterate_devices ||
- !ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
+ ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
return false;
}
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 00cee02..60bce43 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1645,12 +1645,12 @@ int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
return r;
}
-static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
+static int __resize_space_map(struct dm_space_map *sm, dm_block_t new_count)
{
int r;
dm_block_t old_count;
- r = dm_sm_get_nr_blocks(pmd->data_sm, &old_count);
+ r = dm_sm_get_nr_blocks(sm, &old_count);
if (r)
return r;
@@ -1658,11 +1658,11 @@ static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
return 0;
if (new_count < old_count) {
- DMERR("cannot reduce size of data device");
+ DMERR("cannot reduce size of space map");
return -EINVAL;
}
- return dm_sm_extend(pmd->data_sm, new_count - old_count);
+ return dm_sm_extend(sm, new_count - old_count);
}
int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
@@ -1671,7 +1671,19 @@ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
down_write(&pmd->root_lock);
if (!pmd->fail_io)
- r = __resize_data_dev(pmd, new_count);
+ r = __resize_space_map(pmd->data_sm, new_count);
+ up_write(&pmd->root_lock);
+
+ return r;
+}
+
+int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
+{
+ int r = -EINVAL;
+
+ down_write(&pmd->root_lock);
+ if (!pmd->fail_io)
+ r = __resize_space_map(pmd->metadata_sm, new_count);
up_write(&pmd->root_lock);
return r;
@@ -1684,3 +1696,17 @@ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)
dm_bm_set_read_only(pmd->bm);
up_write(&pmd->root_lock);
}
+
+int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
+ dm_block_t threshold,
+ dm_sm_threshold_fn fn,
+ void *context)
+{
+ int r;
+
+ down_write(&pmd->root_lock);
+ r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context);
+ up_write(&pmd->root_lock);
+
+ return r;
+}
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index 0cecc37..845ebbe 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -8,6 +8,7 @@
#define DM_THIN_METADATA_H
#include "persistent-data/dm-block-manager.h"
+#include "persistent-data/dm-space-map.h"
#define THIN_METADATA_BLOCK_SIZE 4096
@@ -185,6 +186,7 @@ int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
* blocks would be lost.
*/
int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
+int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
/*
* Flicks the underlying block manager into read only mode, so you know
@@ -192,6 +194,11 @@ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
*/
void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd);
+int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
+ dm_block_t threshold,
+ dm_sm_threshold_fn fn,
+ void *context);
+
/*----------------------------------------------------------------*/
#endif
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 004ad165..759cffc 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -922,7 +922,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
return r;
if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
- DMWARN("%s: reached low water mark, sending event.",
+ DMWARN("%s: reached low water mark for data device: sending event.",
dm_device_name(pool->pool_md));
spin_lock_irqsave(&pool->lock, flags);
pool->low_water_triggered = 1;
@@ -1281,6 +1281,10 @@ static void process_bio_fail(struct thin_c *tc, struct bio *bio)
bio_io_error(bio);
}
+/*
+ * FIXME: should we also commit due to size of transaction, measured in
+ * metadata blocks?
+ */
static int need_commit_due_to_time(struct pool *pool)
{
return jiffies < pool->last_commit_jiffies ||
@@ -1909,6 +1913,56 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
return r;
}
+static void metadata_low_callback(void *context)
+{
+ struct pool *pool = context;
+
+ DMWARN("%s: reached low water mark for metadata device: sending event.",
+ dm_device_name(pool->pool_md));
+
+ dm_table_event(pool->ti->table);
+}
+
+static sector_t get_metadata_dev_size(struct block_device *bdev)
+{
+ sector_t metadata_dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+ char buffer[BDEVNAME_SIZE];
+
+ if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) {
+ DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
+ bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
+ metadata_dev_size = THIN_METADATA_MAX_SECTORS_WARNING;
+ }
+
+ return metadata_dev_size;
+}
+
+static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
+{
+ sector_t metadata_dev_size = get_metadata_dev_size(bdev);
+
+ sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
+
+ return metadata_dev_size;
+}
+
+/*
+ * When a metadata threshold is crossed a dm event is triggered, and
+ * userland should respond by growing the metadata device. We could let
+ * userland set the threshold, like we do with the data threshold, but I'm
+ * not sure they know enough to do this well.
+ */
+static dm_block_t calc_metadata_threshold(struct pool_c *pt)
+{
+ /*
+ * 4M is ample for all ops with the possible exception of thin
+ * device deletion which is harmless if it fails (just retry the
+ * delete after you've grown the device).
+ */
+ dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
+ return min((dm_block_t)1024ULL /* 4M */, quarter);
+}
+
/*
* thin-pool <metadata dev> <data dev>
* <data block size (sectors)>
@@ -1931,8 +1985,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
unsigned long block_size;
dm_block_t low_water_blocks;
struct dm_dev *metadata_dev;
- sector_t metadata_dev_size;
- char b[BDEVNAME_SIZE];
+ fmode_t metadata_mode;
/*
* FIXME Remove validation from scope of lock.
@@ -1944,19 +1997,32 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
r = -EINVAL;
goto out_unlock;
}
+
as.argc = argc;
as.argv = argv;
- r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &metadata_dev);
+ /*
+ * Set default pool features.
+ */
+ pool_features_init(&pf);
+
+ dm_consume_args(&as, 4);
+ r = parse_pool_features(&as, &pf, ti);
+ if (r)
+ goto out_unlock;
+
+ metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
+ r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
if (r) {
ti->error = "Error opening metadata block device";
goto out_unlock;
}
- metadata_dev_size = i_size_read(metadata_dev->bdev->bd_inode) >> SECTOR_SHIFT;
- if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
- DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
- bdevname(metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
+ /*
+ * Run for the side-effect of possibly issuing a warning if the
+ * device is too big.
+ */
+ (void) get_metadata_dev_size(metadata_dev->bdev);
r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
if (r) {
@@ -1979,16 +2045,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto out;
}
- /*
- * Set default pool features.
- */
- pool_features_init(&pf);
-
- dm_consume_args(&as, 4);
- r = parse_pool_features(&as, &pf, ti);
- if (r)
- goto out;
-
pt = kzalloc(sizeof(*pt), GFP_KERNEL);
if (!pt) {
r = -ENOMEM;
@@ -2040,6 +2096,13 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
ti->private = pt;
+ r = dm_pool_register_metadata_threshold(pt->pool->pmd,
+ calc_metadata_threshold(pt),
+ metadata_low_callback,
+ pool);
+ if (r)
+ goto out_free_pt;
+
pt->callbacks.congested_fn = pool_is_congested;
dm_table_add_target_callbacks(ti->table, &pt->callbacks);
@@ -2079,18 +2142,7 @@ static int pool_map(struct dm_target *ti, struct bio *bio)
return r;
}
-/*
- * Retrieves the number of blocks of the data device from
- * the superblock and compares it to the actual device size,
- * thus resizing the data device in case it has grown.
- *
- * This both copes with opening preallocated data devices in the ctr
- * being followed by a resume
- * -and-
- * calling the resume method individually after userspace has
- * grown the data device in reaction to a table event.
- */
-static int pool_preresume(struct dm_target *ti)
+static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
{
int r;
struct pool_c *pt = ti->private;
@@ -2098,12 +2150,7 @@ static int pool_preresume(struct dm_target *ti)
sector_t data_size = ti->len;
dm_block_t sb_data_size;
- /*
- * Take control of the pool object.
- */
- r = bind_control_target(pool, ti);
- if (r)
- return r;
+ *need_commit = false;
(void) sector_div(data_size, pool->sectors_per_block);
@@ -2114,7 +2161,7 @@ static int pool_preresume(struct dm_target *ti)
}
if (data_size < sb_data_size) {
- DMERR("pool target too small, is %llu blocks (expected %llu)",
+ DMERR("pool target (%llu blocks) too small: expected %llu",
(unsigned long long)data_size, sb_data_size);
return -EINVAL;
@@ -2122,17 +2169,90 @@ static int pool_preresume(struct dm_target *ti)
r = dm_pool_resize_data_dev(pool->pmd, data_size);
if (r) {
DMERR("failed to resize data device");
- /* FIXME Stricter than necessary: Rollback transaction instead here */
set_pool_mode(pool, PM_READ_ONLY);
return r;
}
- (void) commit_or_fallback(pool);
+ *need_commit = true;
}
return 0;
}
+static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
+{
+ int r;
+ struct pool_c *pt = ti->private;
+ struct pool *pool = pt->pool;
+ dm_block_t metadata_dev_size, sb_metadata_dev_size;
+
+ *need_commit = false;
+
+ metadata_dev_size = get_metadata_dev_size(pool->md_dev);
+
+ r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
+ if (r) {
+ DMERR("failed to retrieve data device size");
+ return r;
+ }
+
+ if (metadata_dev_size < sb_metadata_dev_size) {
+ DMERR("metadata device (%llu sectors) too small: expected %llu",
+ metadata_dev_size, sb_metadata_dev_size);
+ return -EINVAL;
+
+ } else if (metadata_dev_size > sb_metadata_dev_size) {
+ r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
+ if (r) {
+ DMERR("failed to resize metadata device");
+ return r;
+ }
+
+ *need_commit = true;
+ }
+
+ return 0;
+}
+
+/*
+ * Retrieves the number of blocks of the data device from
+ * the superblock and compares it to the actual device size,
+ * thus resizing the data device in case it has grown.
+ *
+ * This both copes with opening preallocated data devices in the ctr
+ * being followed by a resume
+ * -and-
+ * calling the resume method individually after userspace has
+ * grown the data device in reaction to a table event.
+ */
+static int pool_preresume(struct dm_target *ti)
+{
+ int r;
+ bool need_commit1, need_commit2;
+ struct pool_c *pt = ti->private;
+ struct pool *pool = pt->pool;
+
+ /*
+ * Take control of the pool object.
+ */
+ r = bind_control_target(pool, ti);
+ if (r)
+ return r;
+
+ r = maybe_resize_data_dev(ti, &need_commit1);
+ if (r)
+ return r;
+
+ r = maybe_resize_metadata_dev(ti, &need_commit2);
+ if (r)
+ return r;
+
+ if (need_commit1 || need_commit2)
+ (void) commit_or_fallback(pool);
+
+ return 0;
+}
+
static void pool_resume(struct dm_target *ti)
{
struct pool_c *pt = ti->private;
@@ -2549,7 +2669,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 7, 0},
+ .version = {1, 8, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index f6d29e6..e735a6d 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -248,7 +248,8 @@ static struct dm_space_map ops = {
.new_block = sm_disk_new_block,
.commit = sm_disk_commit,
.root_size = sm_disk_root_size,
- .copy_root = sm_disk_copy_root
+ .copy_root = sm_disk_copy_root,
+ .register_threshold_callback = NULL
};
struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 906cf3d..1c95968 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -17,6 +17,55 @@
/*----------------------------------------------------------------*/
/*
+ * An edge triggered threshold.
+ */
+struct threshold {
+ bool threshold_set;
+ bool value_set;
+ dm_block_t threshold;
+ dm_block_t current_value;
+ dm_sm_threshold_fn fn;
+ void *context;
+};
+
+static void threshold_init(struct threshold *t)
+{
+ t->threshold_set = false;
+ t->value_set = false;
+}
+
+static void set_threshold(struct threshold *t, dm_block_t value,
+ dm_sm_threshold_fn fn, void *context)
+{
+ t->threshold_set = true;
+ t->threshold = value;
+ t->fn = fn;
+ t->context = context;
+}
+
+static bool below_threshold(struct threshold *t, dm_block_t value)
+{
+ return t->threshold_set && value <= t->threshold;
+}
+
+static bool threshold_already_triggered(struct threshold *t)
+{
+ return t->value_set && below_threshold(t, t->current_value);
+}
+
+static void check_threshold(struct threshold *t, dm_block_t value)
+{
+ if (below_threshold(t, value) &&
+ !threshold_already_triggered(t))
+ t->fn(t->context);
+
+ t->value_set = true;
+ t->current_value = value;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
* Space map interface.
*
* The low level disk format is written using the standard btree and
@@ -54,6 +103,8 @@ struct sm_metadata {
unsigned allocated_this_transaction;
unsigned nr_uncommitted;
struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS];
+
+ struct threshold threshold;
};
static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b)
@@ -144,12 +195,6 @@ static void sm_metadata_destroy(struct dm_space_map *sm)
kfree(smm);
}
-static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
-{
- DMERR("doesn't support extend");
- return -EINVAL;
-}
-
static int sm_metadata_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
{
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
@@ -335,9 +380,19 @@ static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b)
static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
{
+ dm_block_t count;
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
int r = sm_metadata_new_block_(sm, b);
if (r)
DMERR("unable to allocate new metadata block");
+
+ r = sm_metadata_get_nr_free(sm, &count);
+ if (r)
+ DMERR("couldn't get free block count");
+
+ check_threshold(&smm->threshold, count);
+
return r;
}
@@ -357,6 +412,18 @@ static int sm_metadata_commit(struct dm_space_map *sm)
return 0;
}
+static int sm_metadata_register_threshold_callback(struct dm_space_map *sm,
+ dm_block_t threshold,
+ dm_sm_threshold_fn fn,
+ void *context)
+{
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
+ set_threshold(&smm->threshold, threshold, fn, context);
+
+ return 0;
+}
+
static int sm_metadata_root_size(struct dm_space_map *sm, size_t *result)
{
*result = sizeof(struct disk_sm_root);
@@ -382,6 +449,8 @@ static int sm_metadata_copy_root(struct dm_space_map *sm, void *where_le, size_t
return 0;
}
+static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks);
+
static struct dm_space_map ops = {
.destroy = sm_metadata_destroy,
.extend = sm_metadata_extend,
@@ -395,7 +464,8 @@ static struct dm_space_map ops = {
.new_block = sm_metadata_new_block,
.commit = sm_metadata_commit,
.root_size = sm_metadata_root_size,
- .copy_root = sm_metadata_copy_root
+ .copy_root = sm_metadata_copy_root,
+ .register_threshold_callback = sm_metadata_register_threshold_callback
};
/*----------------------------------------------------------------*/
@@ -410,7 +480,7 @@ static void sm_bootstrap_destroy(struct dm_space_map *sm)
static int sm_bootstrap_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
{
- DMERR("boostrap doesn't support extend");
+ DMERR("bootstrap doesn't support extend");
return -EINVAL;
}
@@ -450,7 +520,7 @@ static int sm_bootstrap_count_is_more_than_one(struct dm_space_map *sm,
static int sm_bootstrap_set_count(struct dm_space_map *sm, dm_block_t b,
uint32_t count)
{
- DMERR("boostrap doesn't support set_count");
+ DMERR("bootstrap doesn't support set_count");
return -EINVAL;
}
@@ -491,7 +561,7 @@ static int sm_bootstrap_commit(struct dm_space_map *sm)
static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result)
{
- DMERR("boostrap doesn't support root_size");
+ DMERR("bootstrap doesn't support root_size");
return -EINVAL;
}
@@ -499,7 +569,7 @@ static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result)
static int sm_bootstrap_copy_root(struct dm_space_map *sm, void *where,
size_t max)
{
- DMERR("boostrap doesn't support copy_root");
+ DMERR("bootstrap doesn't support copy_root");
return -EINVAL;
}
@@ -517,11 +587,42 @@ static struct dm_space_map bootstrap_ops = {
.new_block = sm_bootstrap_new_block,
.commit = sm_bootstrap_commit,
.root_size = sm_bootstrap_root_size,
- .copy_root = sm_bootstrap_copy_root
+ .copy_root = sm_bootstrap_copy_root,
+ .register_threshold_callback = NULL
};
/*----------------------------------------------------------------*/
+static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
+{
+ int r, i;
+ enum allocation_event ev;
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+ dm_block_t old_len = smm->ll.nr_blocks;
+
+ /*
+ * Flick into a mode where all blocks get allocated in the new area.
+ */
+ smm->begin = old_len;
+ memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
+
+ /*
+ * Extend.
+ */
+ r = sm_ll_extend(&smm->ll, extra_blocks);
+
+ /*
+ * Switch back to normal behaviour.
+ */
+ memcpy(&smm->sm, &ops, sizeof(smm->sm));
+ for (i = old_len; !r && i < smm->begin; i++)
+ r = sm_ll_inc(&smm->ll, i, &ev);
+
+ return r;
+}
+
+/*----------------------------------------------------------------*/
+
struct dm_space_map *dm_sm_metadata_init(void)
{
struct sm_metadata *smm;
@@ -549,6 +650,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
smm->recursion_count = 0;
smm->allocated_this_transaction = 0;
smm->nr_uncommitted = 0;
+ threshold_init(&smm->threshold);
memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
@@ -590,6 +692,7 @@ int dm_sm_metadata_open(struct dm_space_map *sm,
smm->recursion_count = 0;
smm->allocated_this_transaction = 0;
smm->nr_uncommitted = 0;
+ threshold_init(&smm->threshold);
memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
return 0;
diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
index 1cbfc6b..3e6d115 100644
--- a/drivers/md/persistent-data/dm-space-map.h
+++ b/drivers/md/persistent-data/dm-space-map.h
@@ -9,6 +9,8 @@
#include "dm-block-manager.h"
+typedef void (*dm_sm_threshold_fn)(void *context);
+
/*
* struct dm_space_map keeps a record of how many times each block in a device
* is referenced. It needs to be fixed on disk as part of the transaction.
@@ -59,6 +61,15 @@ struct dm_space_map {
*/
int (*root_size)(struct dm_space_map *sm, size_t *result);
int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
+
+ /*
+ * You can register one threshold callback which is edge-triggered
+ * when the free space in the space map drops below the threshold.
+ */
+ int (*register_threshold_callback)(struct dm_space_map *sm,
+ dm_block_t threshold,
+ dm_sm_threshold_fn fn,
+ void *context);
};
/*----------------------------------------------------------------*/
@@ -131,4 +142,16 @@ static inline int dm_sm_copy_root(struct dm_space_map *sm, void *copy_to_here_le
return sm->copy_root(sm, copy_to_here_le, len);
}
+static inline int dm_sm_register_threshold_callback(struct dm_space_map *sm,
+ dm_block_t threshold,
+ dm_sm_threshold_fn fn,
+ void *context)
+{
+ if (sm->register_threshold_callback)
+ return sm->register_threshold_callback(sm, threshold, fn, context);
+
+ return -EINVAL;
+}
+
+
#endif /* _LINUX_DM_SPACE_MAP_H */
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 557bec5..5fab4e6e 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -157,19 +157,6 @@ config MTD_BCM47XX_PARTS
comment "User Modules And Translation Layers"
-config MTD_CHAR
- tristate "Direct char device access to MTD devices"
- help
- This provides a character device for each MTD device present in
- the system, allowing the user to read and write directly to the
- memory chips, and also use ioctl() to obtain information about
- the device, or to erase parts of it.
-
-config HAVE_MTD_OTP
- bool
- help
- Enable access to OTP regions using MTD_CHAR.
-
config MTD_BLKDEVS
tristate "Common interface to block layer for MTD 'translation layers'"
depends on BLOCK
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 18a38e5..4cfb31e 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -4,7 +4,7 @@
# Core functionality.
obj-$(CONFIG_MTD) += mtd.o
-mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o
+mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o mtdchar.o
obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
@@ -15,7 +15,6 @@ obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o
obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o
# 'Users' - code which presents functionality to userspace.
-obj-$(CONFIG_MTD_CHAR) += mtdchar.o
obj-$(CONFIG_MTD_BLKDEVS) += mtd_blkdevs.o
obj-$(CONFIG_MTD_BLOCK) += mtdblock.o
obj-$(CONFIG_MTD_BLOCK_RO) += mtdblock_ro.o
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index c219e3d..e4696b3 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -146,7 +146,6 @@ config MTD_CFI_I8
config MTD_OTP
bool "Protection Registers aka one-time programmable (OTP) bits"
depends on MTD_CFI_ADV_OPTIONS
- select HAVE_MTD_OTP
default n
help
This enables support for reading, writing and locking so called
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 12311f5..2a4d55e 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -71,7 +71,6 @@ config MTD_DATAFLASH_WRITE_VERIFY
config MTD_DATAFLASH_OTP
bool "DataFlash OTP support (Security Register)"
depends on MTD_DATAFLASH
- select HAVE_MTD_OTP
help
Newer DataFlash chips (revisions C and D) support 128 bytes of
one-time-programmable (OTP) data. The first half may be written
@@ -205,69 +204,6 @@ config MTD_BLOCK2MTD
comment "Disk-On-Chip Device Drivers"
-config MTD_DOC2000
- tristate "M-Systems Disk-On-Chip 2000 and Millennium (DEPRECATED)"
- depends on MTD_NAND
- select MTD_DOCPROBE
- select MTD_NAND_IDS
- ---help---
- This provides an MTD device driver for the M-Systems DiskOnChip
- 2000 and Millennium devices. Originally designed for the DiskOnChip
- 2000, it also now includes support for the DiskOnChip Millennium.
- If you have problems with this driver and the DiskOnChip Millennium,
- you may wish to try the alternative Millennium driver below. To use
- the alternative driver, you will need to undefine DOC_SINGLE_DRIVER
- in the <file:drivers/mtd/devices/docprobe.c> source code.
-
- If you use this device, you probably also want to enable the NFTL
- 'NAND Flash Translation Layer' option below, which is used to
- emulate a block device by using a kind of file system on the flash
- chips.
-
- NOTE: This driver is deprecated and will probably be removed soon.
- Please try the new DiskOnChip driver under "NAND Flash Device
- Drivers".
-
-config MTD_DOC2001
- tristate "M-Systems Disk-On-Chip Millennium-only alternative driver (DEPRECATED)"
- depends on MTD_NAND
- select MTD_DOCPROBE
- select MTD_NAND_IDS
- ---help---
- This provides an alternative MTD device driver for the M-Systems
- DiskOnChip Millennium devices. Use this if you have problems with
- the combined DiskOnChip 2000 and Millennium driver above. To get
- the DiskOnChip probe code to load and use this driver instead of
- the other one, you will need to undefine DOC_SINGLE_DRIVER near
- the beginning of <file:drivers/mtd/devices/docprobe.c>.
-
- If you use this device, you probably also want to enable the NFTL
- 'NAND Flash Translation Layer' option below, which is used to
- emulate a block device by using a kind of file system on the flash
- chips.
-
- NOTE: This driver is deprecated and will probably be removed soon.
- Please try the new DiskOnChip driver under "NAND Flash Device
- Drivers".
-
-config MTD_DOC2001PLUS
- tristate "M-Systems Disk-On-Chip Millennium Plus"
- depends on MTD_NAND
- select MTD_DOCPROBE
- select MTD_NAND_IDS
- ---help---
- This provides an MTD device driver for the M-Systems DiskOnChip
- Millennium Plus devices.
-
- If you use this device, you probably also want to enable the INFTL
- 'Inverse NAND Flash Translation Layer' option below, which is used
- to emulate a block device by using a kind of file system on the
- flash chips.
-
- NOTE: This driver will soon be replaced by the new DiskOnChip driver
- under "NAND Flash Device Drivers" (currently that driver does not
- support all Millennium Plus devices).
-
config MTD_DOCG3
tristate "M-Systems Disk-On-Chip G3"
select BCH
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index 369a194..d83bd73 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -2,12 +2,7 @@
# linux/drivers/mtd/devices/Makefile
#
-obj-$(CONFIG_MTD_DOC2000) += doc2000.o
-obj-$(CONFIG_MTD_DOC2001) += doc2001.o
-obj-$(CONFIG_MTD_DOC2001PLUS) += doc2001plus.o
obj-$(CONFIG_MTD_DOCG3) += docg3.o
-obj-$(CONFIG_MTD_DOCPROBE) += docprobe.o
-obj-$(CONFIG_MTD_DOCECC) += docecc.o
obj-$(CONFIG_MTD_SLRAM) += slram.o
obj-$(CONFIG_MTD_PHRAM) += phram.o
obj-$(CONFIG_MTD_PMC551) += pmc551.o
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
index 9526628..18e7761 100644
--- a/drivers/mtd/devices/bcm47xxsflash.c
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -10,7 +10,7 @@
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Serial flash driver for BCMA bus");
-static const char *probes[] = { "bcm47xxpart", NULL };
+static const char * const probes[] = { "bcm47xxpart", NULL };
static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
@@ -61,6 +61,17 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
}
sflash->priv = b47s;
+ b47s->bcma_cc = container_of(sflash, struct bcma_drv_cc, sflash);
+
+ switch (b47s->bcma_cc->capabilities & BCMA_CC_CAP_FLASHT) {
+ case BCMA_CC_FLASHT_STSER:
+ b47s->type = BCM47XXSFLASH_TYPE_ST;
+ break;
+ case BCMA_CC_FLASHT_ATSER:
+ b47s->type = BCM47XXSFLASH_TYPE_ATMEL;
+ break;
+ }
+
b47s->window = sflash->window;
b47s->blocksize = sflash->blocksize;
b47s->numblocks = sflash->numblocks;
diff --git a/drivers/mtd/devices/bcm47xxsflash.h b/drivers/mtd/devices/bcm47xxsflash.h
index ebf6f71..f22f8c4 100644
--- a/drivers/mtd/devices/bcm47xxsflash.h
+++ b/drivers/mtd/devices/bcm47xxsflash.h
@@ -3,7 +3,66 @@
#include <linux/mtd/mtd.h>
+/* Used for ST flashes only. */
+#define OPCODE_ST_WREN 0x0006 /* Write Enable */
+#define OPCODE_ST_WRDIS 0x0004 /* Write Disable */
+#define OPCODE_ST_RDSR 0x0105 /* Read Status Register */
+#define OPCODE_ST_WRSR 0x0101 /* Write Status Register */
+#define OPCODE_ST_READ 0x0303 /* Read Data Bytes */
+#define OPCODE_ST_PP 0x0302 /* Page Program */
+#define OPCODE_ST_SE 0x02d8 /* Sector Erase */
+#define OPCODE_ST_BE 0x00c7 /* Bulk Erase */
+#define OPCODE_ST_DP 0x00b9 /* Deep Power-down */
+#define OPCODE_ST_RES 0x03ab /* Read Electronic Signature */
+#define OPCODE_ST_CSA 0x1000 /* Keep chip select asserted */
+#define OPCODE_ST_SSE 0x0220 /* Sub-sector Erase */
+
+/* Used for Atmel flashes only. */
+#define OPCODE_AT_READ 0x07e8
+#define OPCODE_AT_PAGE_READ 0x07d2
+#define OPCODE_AT_STATUS 0x01d7
+#define OPCODE_AT_BUF1_WRITE 0x0384
+#define OPCODE_AT_BUF2_WRITE 0x0387
+#define OPCODE_AT_BUF1_ERASE_PROGRAM 0x0283
+#define OPCODE_AT_BUF2_ERASE_PROGRAM 0x0286
+#define OPCODE_AT_BUF1_PROGRAM 0x0288
+#define OPCODE_AT_BUF2_PROGRAM 0x0289
+#define OPCODE_AT_PAGE_ERASE 0x0281
+#define OPCODE_AT_BLOCK_ERASE 0x0250
+#define OPCODE_AT_BUF1_WRITE_ERASE_PROGRAM 0x0382
+#define OPCODE_AT_BUF2_WRITE_ERASE_PROGRAM 0x0385
+#define OPCODE_AT_BUF1_LOAD 0x0253
+#define OPCODE_AT_BUF2_LOAD 0x0255
+#define OPCODE_AT_BUF1_COMPARE 0x0260
+#define OPCODE_AT_BUF2_COMPARE 0x0261
+#define OPCODE_AT_BUF1_REPROGRAM 0x0258
+#define OPCODE_AT_BUF2_REPROGRAM 0x0259
+
+/* Status register bits for ST flashes */
+#define SR_ST_WIP 0x01 /* Write In Progress */
+#define SR_ST_WEL 0x02 /* Write Enable Latch */
+#define SR_ST_BP_MASK 0x1c /* Block Protect */
+#define SR_ST_BP_SHIFT 2
+#define SR_ST_SRWD 0x80 /* Status Register Write Disable */
+
+/* Status register bits for Atmel flashes */
+#define SR_AT_READY 0x80
+#define SR_AT_MISMATCH 0x40
+#define SR_AT_ID_MASK 0x38
+#define SR_AT_ID_SHIFT 3
+
+struct bcma_drv_cc;
+
+enum bcm47xxsflash_type {
+ BCM47XXSFLASH_TYPE_ATMEL,
+ BCM47XXSFLASH_TYPE_ST,
+};
+
struct bcm47xxsflash {
+ struct bcma_drv_cc *bcma_cc;
+
+ enum bcm47xxsflash_type type;
+
u32 window;
u32 blocksize;
u16 numblocks;
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
deleted file mode 100644
index a4eb8b5..0000000
--- a/drivers/mtd/devices/doc2000.c
+++ /dev/null
@@ -1,1178 +0,0 @@
-
-/*
- * Linux driver for Disk-On-Chip 2000 and Millennium
- * (c) 1999 Machine Vision Holdings, Inc.
- * (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/errno.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/bitops.h>
-#include <linux/mutex.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/doc2000.h>
-
-#define DOC_SUPPORT_2000
-#define DOC_SUPPORT_2000TSOP
-#define DOC_SUPPORT_MILLENNIUM
-
-#ifdef DOC_SUPPORT_2000
-#define DoC_is_2000(doc) (doc->ChipID == DOC_ChipID_Doc2k)
-#else
-#define DoC_is_2000(doc) (0)
-#endif
-
-#if defined(DOC_SUPPORT_2000TSOP) || defined(DOC_SUPPORT_MILLENNIUM)
-#define DoC_is_Millennium(doc) (doc->ChipID == DOC_ChipID_DocMil)
-#else
-#define DoC_is_Millennium(doc) (0)
-#endif
-
-/* #define ECC_DEBUG */
-
-/* I have no idea why some DoC chips can not use memcpy_from|to_io().
- * This may be due to the different revisions of the ASIC controller built-in or
- * simplily a QA/Bug issue. Who knows ?? If you have trouble, please uncomment
- * this:
- #undef USE_MEMCPY
-*/
-
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf);
-static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf);
-static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops);
-static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops);
-static int doc_write_oob_nolock(struct mtd_info *mtd, loff_t ofs, size_t len,
- size_t *retlen, const u_char *buf);
-static int doc_erase (struct mtd_info *mtd, struct erase_info *instr);
-
-static struct mtd_info *doc2klist = NULL;
-
-/* Perform the required delay cycles by reading from the appropriate register */
-static void DoC_Delay(struct DiskOnChip *doc, unsigned short cycles)
-{
- volatile char dummy;
- int i;
-
- for (i = 0; i < cycles; i++) {
- if (DoC_is_Millennium(doc))
- dummy = ReadDOC(doc->virtadr, NOP);
- else
- dummy = ReadDOC(doc->virtadr, DOCStatus);
- }
-
-}
-
-/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
-static int _DoC_WaitReady(struct DiskOnChip *doc)
-{
- void __iomem *docptr = doc->virtadr;
- unsigned long timeo = jiffies + (HZ * 10);
-
- pr_debug("_DoC_WaitReady called for out-of-line wait\n");
-
- /* Out-of-line routine to wait for chip response */
- while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
- /* issue 2 read from NOP register after reading from CDSNControl register
- see Software Requirement 11.4 item 2. */
- DoC_Delay(doc, 2);
-
- if (time_after(jiffies, timeo)) {
- pr_debug("_DoC_WaitReady timed out.\n");
- return -EIO;
- }
- udelay(1);
- cond_resched();
- }
-
- return 0;
-}
-
-static inline int DoC_WaitReady(struct DiskOnChip *doc)
-{
- void __iomem *docptr = doc->virtadr;
-
- /* This is inline, to optimise the common case, where it's ready instantly */
- int ret = 0;
-
- /* 4 read form NOP register should be issued in prior to the read from CDSNControl
- see Software Requirement 11.4 item 2. */
- DoC_Delay(doc, 4);
-
- if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B))
- /* Call the out-of-line routine to wait */
- ret = _DoC_WaitReady(doc);
-
- /* issue 2 read from NOP register after reading from CDSNControl register
- see Software Requirement 11.4 item 2. */
- DoC_Delay(doc, 2);
-
- return ret;
-}
-
-/* DoC_Command: Send a flash command to the flash chip through the CDSN Slow IO register to
- bypass the internal pipeline. Each of 4 delay cycles (read from the NOP register) is
- required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */
-
-static int DoC_Command(struct DiskOnChip *doc, unsigned char command,
- unsigned char xtraflags)
-{
- void __iomem *docptr = doc->virtadr;
-
- if (DoC_is_2000(doc))
- xtraflags |= CDSN_CTRL_FLASH_IO;
-
- /* Assert the CLE (Command Latch Enable) line to the flash chip */
- WriteDOC(xtraflags | CDSN_CTRL_CLE | CDSN_CTRL_CE, docptr, CDSNControl);
- DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */
-
- if (DoC_is_Millennium(doc))
- WriteDOC(command, docptr, CDSNSlowIO);
-
- /* Send the command */
- WriteDOC_(command, docptr, doc->ioreg);
- if (DoC_is_Millennium(doc))
- WriteDOC(command, docptr, WritePipeTerm);
-
- /* Lower the CLE line */
- WriteDOC(xtraflags | CDSN_CTRL_CE, docptr, CDSNControl);
- DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */
-
- /* Wait for the chip to respond - Software requirement 11.4.1 (extended for any command) */
- return DoC_WaitReady(doc);
-}
-
-/* DoC_Address: Set the current address for the flash chip through the CDSN Slow IO register to
- bypass the internal pipeline. Each of 4 delay cycles (read from the NOP register) is
- required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */
-
-static int DoC_Address(struct DiskOnChip *doc, int numbytes, unsigned long ofs,
- unsigned char xtraflags1, unsigned char xtraflags2)
-{
- int i;
- void __iomem *docptr = doc->virtadr;
-
- if (DoC_is_2000(doc))
- xtraflags1 |= CDSN_CTRL_FLASH_IO;
-
- /* Assert the ALE (Address Latch Enable) line to the flash chip */
- WriteDOC(xtraflags1 | CDSN_CTRL_ALE | CDSN_CTRL_CE, docptr, CDSNControl);
-
- DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */
-
- /* Send the address */
- /* Devices with 256-byte page are addressed as:
- Column (bits 0-7), Page (bits 8-15, 16-23, 24-31)
- * there is no device on the market with page256
- and more than 24 bits.
- Devices with 512-byte page are addressed as:
- Column (bits 0-7), Page (bits 9-16, 17-24, 25-31)
- * 25-31 is sent only if the chip support it.
- * bit 8 changes the read command to be sent
- (NAND_CMD_READ0 or NAND_CMD_READ1).
- */
-
- if (numbytes == ADDR_COLUMN || numbytes == ADDR_COLUMN_PAGE) {
- if (DoC_is_Millennium(doc))
- WriteDOC(ofs & 0xff, docptr, CDSNSlowIO);
- WriteDOC_(ofs & 0xff, docptr, doc->ioreg);
- }
-
- if (doc->page256) {
- ofs = ofs >> 8;
- } else {
- ofs = ofs >> 9;
- }
-
- if (numbytes == ADDR_PAGE || numbytes == ADDR_COLUMN_PAGE) {
- for (i = 0; i < doc->pageadrlen; i++, ofs = ofs >> 8) {
- if (DoC_is_Millennium(doc))
- WriteDOC(ofs & 0xff, docptr, CDSNSlowIO);
- WriteDOC_(ofs & 0xff, docptr, doc->ioreg);
- }
- }
-
- if (DoC_is_Millennium(doc))
- WriteDOC(ofs & 0xff, docptr, WritePipeTerm);
-
- DoC_Delay(doc, 2); /* Needed for some slow flash chips. mf. */
-
- /* FIXME: The SlowIO's for millennium could be replaced by
- a single WritePipeTerm here. mf. */
-
- /* Lower the ALE line */
- WriteDOC(xtraflags1 | xtraflags2 | CDSN_CTRL_CE, docptr,
- CDSNControl);
-
- DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */
-
- /* Wait for the chip to respond - Software requirement 11.4.1 */
- return DoC_WaitReady(doc);
-}
-
-/* Read a buffer from DoC, taking care of Millennium odditys */
-static void DoC_ReadBuf(struct DiskOnChip *doc, u_char * buf, int len)
-{
- volatile int dummy;
- int modulus = 0xffff;
- void __iomem *docptr = doc->virtadr;
- int i;
-
- if (len <= 0)
- return;
-
- if (DoC_is_Millennium(doc)) {
- /* Read the data via the internal pipeline through CDSN IO register,
- see Pipelined Read Operations 11.3 */
- dummy = ReadDOC(docptr, ReadPipeInit);
-
- /* Millennium should use the LastDataRead register - Pipeline Reads */
- len--;
-
- /* This is needed for correctly ECC calculation */
- modulus = 0xff;
- }
-
- for (i = 0; i < len; i++)
- buf[i] = ReadDOC_(docptr, doc->ioreg + (i & modulus));
-
- if (DoC_is_Millennium(doc)) {
- buf[i] = ReadDOC(docptr, LastDataRead);
- }
-}
-
-/* Write a buffer to DoC, taking care of Millennium odditys */
-static void DoC_WriteBuf(struct DiskOnChip *doc, const u_char * buf, int len)
-{
- void __iomem *docptr = doc->virtadr;
- int i;
-
- if (len <= 0)
- return;
-
- for (i = 0; i < len; i++)
- WriteDOC_(buf[i], docptr, doc->ioreg + i);
-
- if (DoC_is_Millennium(doc)) {
- WriteDOC(0x00, docptr, WritePipeTerm);
- }
-}
-
-
-/* DoC_SelectChip: Select a given flash chip within the current floor */
-
-static inline int DoC_SelectChip(struct DiskOnChip *doc, int chip)
-{
- void __iomem *docptr = doc->virtadr;
-
- /* Software requirement 11.4.4 before writing DeviceSelect */
- /* Deassert the CE line to eliminate glitches on the FCE# outputs */
- WriteDOC(CDSN_CTRL_WP, docptr, CDSNControl);
- DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */
-
- /* Select the individual flash chip requested */
- WriteDOC(chip, docptr, CDSNDeviceSelect);
- DoC_Delay(doc, 4);
-
- /* Reassert the CE line */
- WriteDOC(CDSN_CTRL_CE | CDSN_CTRL_FLASH_IO | CDSN_CTRL_WP, docptr,
- CDSNControl);
- DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */
-
- /* Wait for it to be ready */
- return DoC_WaitReady(doc);
-}
-
-/* DoC_SelectFloor: Select a given floor (bank of flash chips) */
-
-static inline int DoC_SelectFloor(struct DiskOnChip *doc, int floor)
-{
- void __iomem *docptr = doc->virtadr;
-
- /* Select the floor (bank) of chips required */
- WriteDOC(floor, docptr, FloorSelect);
-
- /* Wait for the chip to be ready */
- return DoC_WaitReady(doc);
-}
-
-/* DoC_IdentChip: Identify a given NAND chip given {floor,chip} */
-
-static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
-{
- int mfr, id, i, j;
- volatile char dummy;
-
- /* Page in the required floor/chip */
- DoC_SelectFloor(doc, floor);
- DoC_SelectChip(doc, chip);
-
- /* Reset the chip */
- if (DoC_Command(doc, NAND_CMD_RESET, CDSN_CTRL_WP)) {
- pr_debug("DoC_Command (reset) for %d,%d returned true\n",
- floor, chip);
- return 0;
- }
-
-
- /* Read the NAND chip ID: 1. Send ReadID command */
- if (DoC_Command(doc, NAND_CMD_READID, CDSN_CTRL_WP)) {
- pr_debug("DoC_Command (ReadID) for %d,%d returned true\n",
- floor, chip);
- return 0;
- }
-
- /* Read the NAND chip ID: 2. Send address byte zero */
- DoC_Address(doc, ADDR_COLUMN, 0, CDSN_CTRL_WP, 0);
-
- /* Read the manufacturer and device id codes from the device */
-
- if (DoC_is_Millennium(doc)) {
- DoC_Delay(doc, 2);
- dummy = ReadDOC(doc->virtadr, ReadPipeInit);
- mfr = ReadDOC(doc->virtadr, LastDataRead);
-
- DoC_Delay(doc, 2);
- dummy = ReadDOC(doc->virtadr, ReadPipeInit);
- id = ReadDOC(doc->virtadr, LastDataRead);
- } else {
- /* CDSN Slow IO register see Software Req 11.4 item 5. */
- dummy = ReadDOC(doc->virtadr, CDSNSlowIO);
- DoC_Delay(doc, 2);
- mfr = ReadDOC_(doc->virtadr, doc->ioreg);
-
- /* CDSN Slow IO register see Software Req 11.4 item 5. */
- dummy = ReadDOC(doc->virtadr, CDSNSlowIO);
- DoC_Delay(doc, 2);
- id = ReadDOC_(doc->virtadr, doc->ioreg);
- }
-
- /* No response - return failure */
- if (mfr == 0xff || mfr == 0)
- return 0;
-
- /* Check it's the same as the first chip we identified.
- * M-Systems say that any given DiskOnChip device should only
- * contain _one_ type of flash part, although that's not a
- * hardware restriction. */
- if (doc->mfr) {
- if (doc->mfr == mfr && doc->id == id)
- return 1; /* This is the same as the first */
- else
- printk(KERN_WARNING
- "Flash chip at floor %d, chip %d is different:\n",
- floor, chip);
- }
-
- /* Print and store the manufacturer and ID codes. */
- for (i = 0; nand_flash_ids[i].name != NULL; i++) {
- if (id == nand_flash_ids[i].id) {
- /* Try to identify manufacturer */
- for (j = 0; nand_manuf_ids[j].id != 0x0; j++) {
- if (nand_manuf_ids[j].id == mfr)
- break;
- }
- printk(KERN_INFO
- "Flash chip found: Manufacturer ID: %2.2X, "
- "Chip ID: %2.2X (%s:%s)\n", mfr, id,
- nand_manuf_ids[j].name, nand_flash_ids[i].name);
- if (!doc->mfr) {
- doc->mfr = mfr;
- doc->id = id;
- doc->chipshift =
- ffs((nand_flash_ids[i].chipsize << 20)) - 1;
- doc->page256 = (nand_flash_ids[i].pagesize == 256) ? 1 : 0;
- doc->pageadrlen = doc->chipshift > 25 ? 3 : 2;
- doc->erasesize =
- nand_flash_ids[i].erasesize;
- return 1;
- }
- return 0;
- }
- }
-
-
- /* We haven't fully identified the chip. Print as much as we know. */
- printk(KERN_WARNING "Unknown flash chip found: %2.2X %2.2X\n",
- id, mfr);
-
- printk(KERN_WARNING "Please report to dwmw2@infradead.org\n");
- return 0;
-}
-
-/* DoC_ScanChips: Find all NAND chips present in a DiskOnChip, and identify them */
-
-static void DoC_ScanChips(struct DiskOnChip *this, int maxchips)
-{
- int floor, chip;
- int numchips[MAX_FLOORS];
- int ret = 1;
-
- this->numchips = 0;
- this->mfr = 0;
- this->id = 0;
-
- /* For each floor, find the number of valid chips it contains */
- for (floor = 0; floor < MAX_FLOORS; floor++) {
- ret = 1;
- numchips[floor] = 0;
- for (chip = 0; chip < maxchips && ret != 0; chip++) {
-
- ret = DoC_IdentChip(this, floor, chip);
- if (ret) {
- numchips[floor]++;
- this->numchips++;
- }
- }
- }
-
- /* If there are none at all that we recognise, bail */
- if (!this->numchips) {
- printk(KERN_NOTICE "No flash chips recognised.\n");
- return;
- }
-
- /* Allocate an array to hold the information for each chip */
- this->chips = kmalloc(sizeof(struct Nand) * this->numchips, GFP_KERNEL);
- if (!this->chips) {
- printk(KERN_NOTICE "No memory for allocating chip info structures\n");
- return;
- }
-
- ret = 0;
-
- /* Fill out the chip array with {floor, chipno} for each
- * detected chip in the device. */
- for (floor = 0; floor < MAX_FLOORS; floor++) {
- for (chip = 0; chip < numchips[floor]; chip++) {
- this->chips[ret].floor = floor;
- this->chips[ret].chip = chip;
- this->chips[ret].curadr = 0;
- this->chips[ret].curmode = 0x50;
- ret++;
- }
- }
-
- /* Calculate and print the total size of the device */
- this->totlen = this->numchips * (1 << this->chipshift);
-
- printk(KERN_INFO "%d flash chips found. Total DiskOnChip size: %ld MiB\n",
- this->numchips, this->totlen >> 20);
-}
-
-static int DoC2k_is_alias(struct DiskOnChip *doc1, struct DiskOnChip *doc2)
-{
- int tmp1, tmp2, retval;
- if (doc1->physadr == doc2->physadr)
- return 1;
-
- /* Use the alias resolution register which was set aside for this
- * purpose. If it's value is the same on both chips, they might
- * be the same chip, and we write to one and check for a change in
- * the other. It's unclear if this register is usuable in the
- * DoC 2000 (it's in the Millennium docs), but it seems to work. */
- tmp1 = ReadDOC(doc1->virtadr, AliasResolution);
- tmp2 = ReadDOC(doc2->virtadr, AliasResolution);
- if (tmp1 != tmp2)
- return 0;
-
- WriteDOC((tmp1 + 1) % 0xff, doc1->virtadr, AliasResolution);
- tmp2 = ReadDOC(doc2->virtadr, AliasResolution);
- if (tmp2 == (tmp1 + 1) % 0xff)
- retval = 1;
- else
- retval = 0;
-
- /* Restore register contents. May not be necessary, but do it just to
- * be safe. */
- WriteDOC(tmp1, doc1->virtadr, AliasResolution);
-
- return retval;
-}
-
-/* This routine is found from the docprobe code by symbol_get(),
- * which will bump the use count of this module. */
-void DoC2k_init(struct mtd_info *mtd)
-{
- struct DiskOnChip *this = mtd->priv;
- struct DiskOnChip *old = NULL;
- int maxchips;
-
- /* We must avoid being called twice for the same device. */
-
- if (doc2klist)
- old = doc2klist->priv;
-
- while (old) {
- if (DoC2k_is_alias(old, this)) {
- printk(KERN_NOTICE
- "Ignoring DiskOnChip 2000 at 0x%lX - already configured\n",
- this->physadr);
- iounmap(this->virtadr);
- kfree(mtd);
- return;
- }
- if (old->nextdoc)
- old = old->nextdoc->priv;
- else
- old = NULL;
- }
-
-
- switch (this->ChipID) {
- case DOC_ChipID_Doc2kTSOP:
- mtd->name = "DiskOnChip 2000 TSOP";
- this->ioreg = DoC_Mil_CDSN_IO;
- /* Pretend it's a Millennium */
- this->ChipID = DOC_ChipID_DocMil;
- maxchips = MAX_CHIPS;
- break;
- case DOC_ChipID_Doc2k:
- mtd->name = "DiskOnChip 2000";
- this->ioreg = DoC_2k_CDSN_IO;
- maxchips = MAX_CHIPS;
- break;
- case DOC_ChipID_DocMil:
- mtd->name = "DiskOnChip Millennium";
- this->ioreg = DoC_Mil_CDSN_IO;
- maxchips = MAX_CHIPS_MIL;
- break;
- default:
- printk("Unknown ChipID 0x%02x\n", this->ChipID);
- kfree(mtd);
- iounmap(this->virtadr);
- return;
- }
-
- printk(KERN_NOTICE "%s found at address 0x%lX\n", mtd->name,
- this->physadr);
-
- mtd->type = MTD_NANDFLASH;
- mtd->flags = MTD_CAP_NANDFLASH;
- mtd->writebufsize = mtd->writesize = 512;
- mtd->oobsize = 16;
- mtd->ecc_strength = 2;
- mtd->owner = THIS_MODULE;
- mtd->_erase = doc_erase;
- mtd->_read = doc_read;
- mtd->_write = doc_write;
- mtd->_read_oob = doc_read_oob;
- mtd->_write_oob = doc_write_oob;
- this->curfloor = -1;
- this->curchip = -1;
- mutex_init(&this->lock);
-
- /* Ident all the chips present. */
- DoC_ScanChips(this, maxchips);
-
- if (!this->totlen) {
- kfree(mtd);
- iounmap(this->virtadr);
- } else {
- this->nextdoc = doc2klist;
- doc2klist = mtd;
- mtd->size = this->totlen;
- mtd->erasesize = this->erasesize;
- mtd_device_register(mtd, NULL, 0);
- return;
- }
-}
-EXPORT_SYMBOL_GPL(DoC2k_init);
-
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t * retlen, u_char * buf)
-{
- struct DiskOnChip *this = mtd->priv;
- void __iomem *docptr = this->virtadr;
- struct Nand *mychip;
- unsigned char syndrome[6], eccbuf[6];
- volatile char dummy;
- int i, len256 = 0, ret=0;
- size_t left = len;
-
- mutex_lock(&this->lock);
- while (left) {
- len = left;
-
- /* Don't allow a single read to cross a 512-byte block boundary */
- if (from + len > ((from | 0x1ff) + 1))
- len = ((from | 0x1ff) + 1) - from;
-
- /* The ECC will not be calculated correctly if less than 512 is read */
- if (len != 0x200)
- printk(KERN_WARNING
- "ECC needs a full sector read (adr: %lx size %lx)\n",
- (long) from, (long) len);
-
- /* printk("DoC_Read (adr: %lx size %lx)\n", (long) from, (long) len); */
-
-
- /* Find the chip which is to be used and select it */
- mychip = &this->chips[from >> (this->chipshift)];
-
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(this, mychip->floor);
- DoC_SelectChip(this, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(this, mychip->chip);
- }
-
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- DoC_Command(this,
- (!this->page256
- && (from & 0x100)) ? NAND_CMD_READ1 : NAND_CMD_READ0,
- CDSN_CTRL_WP);
- DoC_Address(this, ADDR_COLUMN_PAGE, from, CDSN_CTRL_WP,
- CDSN_CTRL_ECC_IO);
-
- /* Prime the ECC engine */
- WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC(DOC_ECC_EN, docptr, ECCConf);
-
- /* treat crossing 256-byte sector for 2M x 8bits devices */
- if (this->page256 && from + len > (from | 0xff) + 1) {
- len256 = (from | 0xff) + 1 - from;
- DoC_ReadBuf(this, buf, len256);
-
- DoC_Command(this, NAND_CMD_READ0, CDSN_CTRL_WP);
- DoC_Address(this, ADDR_COLUMN_PAGE, from + len256,
- CDSN_CTRL_WP, CDSN_CTRL_ECC_IO);
- }
-
- DoC_ReadBuf(this, &buf[len256], len - len256);
-
- /* Let the caller know we completed it */
- *retlen += len;
-
- /* Read the ECC data through the DiskOnChip ECC logic */
- /* Note: this will work even with 2M x 8bit devices as */
- /* they have 8 bytes of OOB per 256 page. mf. */
- DoC_ReadBuf(this, eccbuf, 6);
-
- /* Flush the pipeline */
- if (DoC_is_Millennium(this)) {
- dummy = ReadDOC(docptr, ECCConf);
- dummy = ReadDOC(docptr, ECCConf);
- i = ReadDOC(docptr, ECCConf);
- } else {
- dummy = ReadDOC(docptr, 2k_ECCStatus);
- dummy = ReadDOC(docptr, 2k_ECCStatus);
- i = ReadDOC(docptr, 2k_ECCStatus);
- }
-
- /* Check the ECC Status */
- if (i & 0x80) {
- int nb_errors;
- /* There was an ECC error */
-#ifdef ECC_DEBUG
- printk(KERN_ERR "DiskOnChip ECC Error: Read at %lx\n", (long)from);
-#endif
- /* Read the ECC syndrome through the DiskOnChip ECC
- logic. These syndrome will be all ZERO when there
- is no error */
- for (i = 0; i < 6; i++) {
- syndrome[i] =
- ReadDOC(docptr, ECCSyndrome0 + i);
- }
- nb_errors = doc_decode_ecc(buf, syndrome);
-
-#ifdef ECC_DEBUG
- printk(KERN_ERR "Errors corrected: %x\n", nb_errors);
-#endif
- if (nb_errors < 0) {
- /* We return error, but have actually done the
- read. Not that this can be told to
- user-space, via sys_read(), but at least
- MTD-aware stuff can know about it by
- checking *retlen */
- ret = -EIO;
- }
- }
-
-#ifdef PSYCHO_DEBUG
- printk(KERN_DEBUG "ECC DATA at %lxB: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
- (long)from, eccbuf[0], eccbuf[1], eccbuf[2],
- eccbuf[3], eccbuf[4], eccbuf[5]);
-#endif
-
- /* disable the ECC engine */
- WriteDOC(DOC_ECC_DIS, docptr , ECCConf);
-
- /* according to 11.4.1, we need to wait for the busy line
- * drop if we read to the end of the page. */
- if(0 == ((from + len) & 0x1ff))
- {
- DoC_WaitReady(this);
- }
-
- from += len;
- left -= len;
- buf += len;
- }
-
- mutex_unlock(&this->lock);
-
- return ret;
-}
-
-static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t * retlen, const u_char * buf)
-{
- struct DiskOnChip *this = mtd->priv;
- int di; /* Yes, DI is a hangover from when I was disassembling the binary driver */
- void __iomem *docptr = this->virtadr;
- unsigned char eccbuf[6];
- volatile char dummy;
- int len256 = 0;
- struct Nand *mychip;
- size_t left = len;
- int status;
-
- mutex_lock(&this->lock);
- while (left) {
- len = left;
-
- /* Don't allow a single write to cross a 512-byte block boundary */
- if (to + len > ((to | 0x1ff) + 1))
- len = ((to | 0x1ff) + 1) - to;
-
- /* The ECC will not be calculated correctly if less than 512 is written */
-/* DBB-
- if (len != 0x200 && eccbuf)
- printk(KERN_WARNING
- "ECC needs a full sector write (adr: %lx size %lx)\n",
- (long) to, (long) len);
- -DBB */
-
- /* printk("DoC_Write (adr: %lx size %lx)\n", (long) to, (long) len); */
-
- /* Find the chip which is to be used and select it */
- mychip = &this->chips[to >> (this->chipshift)];
-
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(this, mychip->floor);
- DoC_SelectChip(this, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(this, mychip->chip);
- }
-
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* Set device to main plane of flash */
- DoC_Command(this, NAND_CMD_RESET, CDSN_CTRL_WP);
- DoC_Command(this,
- (!this->page256
- && (to & 0x100)) ? NAND_CMD_READ1 : NAND_CMD_READ0,
- CDSN_CTRL_WP);
-
- DoC_Command(this, NAND_CMD_SEQIN, 0);
- DoC_Address(this, ADDR_COLUMN_PAGE, to, 0, CDSN_CTRL_ECC_IO);
-
- /* Prime the ECC engine */
- WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
-
- /* treat crossing 256-byte sector for 2M x 8bits devices */
- if (this->page256 && to + len > (to | 0xff) + 1) {
- len256 = (to | 0xff) + 1 - to;
- DoC_WriteBuf(this, buf, len256);
-
- DoC_Command(this, NAND_CMD_PAGEPROG, 0);
-
- DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP);
- /* There's an implicit DoC_WaitReady() in DoC_Command */
-
- dummy = ReadDOC(docptr, CDSNSlowIO);
- DoC_Delay(this, 2);
-
- if (ReadDOC_(docptr, this->ioreg) & 1) {
- printk(KERN_ERR "Error programming flash\n");
- /* Error in programming */
- *retlen = 0;
- mutex_unlock(&this->lock);
- return -EIO;
- }
-
- DoC_Command(this, NAND_CMD_SEQIN, 0);
- DoC_Address(this, ADDR_COLUMN_PAGE, to + len256, 0,
- CDSN_CTRL_ECC_IO);
- }
-
- DoC_WriteBuf(this, &buf[len256], len - len256);
-
- WriteDOC(CDSN_CTRL_ECC_IO | CDSN_CTRL_CE, docptr, CDSNControl);
-
- if (DoC_is_Millennium(this)) {
- WriteDOC(0, docptr, NOP);
- WriteDOC(0, docptr, NOP);
- WriteDOC(0, docptr, NOP);
- } else {
- WriteDOC_(0, docptr, this->ioreg);
- WriteDOC_(0, docptr, this->ioreg);
- WriteDOC_(0, docptr, this->ioreg);
- }
-
- WriteDOC(CDSN_CTRL_ECC_IO | CDSN_CTRL_FLASH_IO | CDSN_CTRL_CE, docptr,
- CDSNControl);
-
- /* Read the ECC data through the DiskOnChip ECC logic */
- for (di = 0; di < 6; di++) {
- eccbuf[di] = ReadDOC(docptr, ECCSyndrome0 + di);
- }
-
- /* Reset the ECC engine */
- WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
-
-#ifdef PSYCHO_DEBUG
- printk
- ("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
- (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
- eccbuf[4], eccbuf[5]);
-#endif
- DoC_Command(this, NAND_CMD_PAGEPROG, 0);
-
- DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP);
- /* There's an implicit DoC_WaitReady() in DoC_Command */
-
- if (DoC_is_Millennium(this)) {
- ReadDOC(docptr, ReadPipeInit);
- status = ReadDOC(docptr, LastDataRead);
- } else {
- dummy = ReadDOC(docptr, CDSNSlowIO);
- DoC_Delay(this, 2);
- status = ReadDOC_(docptr, this->ioreg);
- }
-
- if (status & 1) {
- printk(KERN_ERR "Error programming flash\n");
- /* Error in programming */
- *retlen = 0;
- mutex_unlock(&this->lock);
- return -EIO;
- }
-
- /* Let the caller know we completed it */
- *retlen += len;
-
- {
- unsigned char x[8];
- size_t dummy;
- int ret;
-
- /* Write the ECC data to flash */
- for (di=0; di<6; di++)
- x[di] = eccbuf[di];
-
- x[6]=0x55;
- x[7]=0x55;
-
- ret = doc_write_oob_nolock(mtd, to, 8, &dummy, x);
- if (ret) {
- mutex_unlock(&this->lock);
- return ret;
- }
- }
-
- to += len;
- left -= len;
- buf += len;
- }
-
- mutex_unlock(&this->lock);
- return 0;
-}
-
-static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops)
-{
- struct DiskOnChip *this = mtd->priv;
- int len256 = 0, ret;
- struct Nand *mychip;
- uint8_t *buf = ops->oobbuf;
- size_t len = ops->len;
-
- BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
-
- ofs += ops->ooboffs;
-
- mutex_lock(&this->lock);
-
- mychip = &this->chips[ofs >> this->chipshift];
-
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(this, mychip->floor);
- DoC_SelectChip(this, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(this, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* update address for 2M x 8bit devices. OOB starts on the second */
- /* page to maintain compatibility with doc_read_ecc. */
- if (this->page256) {
- if (!(ofs & 0x8))
- ofs += 0x100;
- else
- ofs -= 0x8;
- }
-
- DoC_Command(this, NAND_CMD_READOOB, CDSN_CTRL_WP);
- DoC_Address(this, ADDR_COLUMN_PAGE, ofs, CDSN_CTRL_WP, 0);
-
- /* treat crossing 8-byte OOB data for 2M x 8bit devices */
- /* Note: datasheet says it should automaticaly wrap to the */
- /* next OOB block, but it didn't work here. mf. */
- if (this->page256 && ofs + len > (ofs | 0x7) + 1) {
- len256 = (ofs | 0x7) + 1 - ofs;
- DoC_ReadBuf(this, buf, len256);
-
- DoC_Command(this, NAND_CMD_READOOB, CDSN_CTRL_WP);
- DoC_Address(this, ADDR_COLUMN_PAGE, ofs & (~0x1ff),
- CDSN_CTRL_WP, 0);
- }
-
- DoC_ReadBuf(this, &buf[len256], len - len256);
-
- ops->retlen = len;
- /* Reading the full OOB data drops us off of the end of the page,
- * causing the flash device to go into busy mode, so we need
- * to wait until ready 11.4.1 and Toshiba TC58256FT docs */
-
- ret = DoC_WaitReady(this);
-
- mutex_unlock(&this->lock);
- return ret;
-
-}
-
-static int doc_write_oob_nolock(struct mtd_info *mtd, loff_t ofs, size_t len,
- size_t * retlen, const u_char * buf)
-{
- struct DiskOnChip *this = mtd->priv;
- int len256 = 0;
- void __iomem *docptr = this->virtadr;
- struct Nand *mychip = &this->chips[ofs >> this->chipshift];
- volatile int dummy;
- int status;
-
- // printk("doc_write_oob(%lx, %d): %2.2X %2.2X %2.2X %2.2X ... %2.2X %2.2X .. %2.2X %2.2X\n",(long)ofs, len,
- // buf[0], buf[1], buf[2], buf[3], buf[8], buf[9], buf[14],buf[15]);
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(this, mychip->floor);
- DoC_SelectChip(this, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(this, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* disable the ECC engine */
- WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC (DOC_ECC_DIS, docptr, ECCConf);
-
- /* Reset the chip, see Software Requirement 11.4 item 1. */
- DoC_Command(this, NAND_CMD_RESET, CDSN_CTRL_WP);
-
- /* issue the Read2 command to set the pointer to the Spare Data Area. */
- DoC_Command(this, NAND_CMD_READOOB, CDSN_CTRL_WP);
-
- /* update address for 2M x 8bit devices. OOB starts on the second */
- /* page to maintain compatibility with doc_read_ecc. */
- if (this->page256) {
- if (!(ofs & 0x8))
- ofs += 0x100;
- else
- ofs -= 0x8;
- }
-
- /* issue the Serial Data In command to initial the Page Program process */
- DoC_Command(this, NAND_CMD_SEQIN, 0);
- DoC_Address(this, ADDR_COLUMN_PAGE, ofs, 0, 0);
-
- /* treat crossing 8-byte OOB data for 2M x 8bit devices */
- /* Note: datasheet says it should automaticaly wrap to the */
- /* next OOB block, but it didn't work here. mf. */
- if (this->page256 && ofs + len > (ofs | 0x7) + 1) {
- len256 = (ofs | 0x7) + 1 - ofs;
- DoC_WriteBuf(this, buf, len256);
-
- DoC_Command(this, NAND_CMD_PAGEPROG, 0);
- DoC_Command(this, NAND_CMD_STATUS, 0);
- /* DoC_WaitReady() is implicit in DoC_Command */
-
- if (DoC_is_Millennium(this)) {
- ReadDOC(docptr, ReadPipeInit);
- status = ReadDOC(docptr, LastDataRead);
- } else {
- dummy = ReadDOC(docptr, CDSNSlowIO);
- DoC_Delay(this, 2);
- status = ReadDOC_(docptr, this->ioreg);
- }
-
- if (status & 1) {
- printk(KERN_ERR "Error programming oob data\n");
- /* There was an error */
- *retlen = 0;
- return -EIO;
- }
- DoC_Command(this, NAND_CMD_SEQIN, 0);
- DoC_Address(this, ADDR_COLUMN_PAGE, ofs & (~0x1ff), 0, 0);
- }
-
- DoC_WriteBuf(this, &buf[len256], len - len256);
-
- DoC_Command(this, NAND_CMD_PAGEPROG, 0);
- DoC_Command(this, NAND_CMD_STATUS, 0);
- /* DoC_WaitReady() is implicit in DoC_Command */
-
- if (DoC_is_Millennium(this)) {
- ReadDOC(docptr, ReadPipeInit);
- status = ReadDOC(docptr, LastDataRead);
- } else {
- dummy = ReadDOC(docptr, CDSNSlowIO);
- DoC_Delay(this, 2);
- status = ReadDOC_(docptr, this->ioreg);
- }
-
- if (status & 1) {
- printk(KERN_ERR "Error programming oob data\n");
- /* There was an error */
- *retlen = 0;
- return -EIO;
- }
-
- *retlen = len;
- return 0;
-
-}
-
-static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops)
-{
- struct DiskOnChip *this = mtd->priv;
- int ret;
-
- BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
-
- mutex_lock(&this->lock);
- ret = doc_write_oob_nolock(mtd, ofs + ops->ooboffs, ops->len,
- &ops->retlen, ops->oobbuf);
-
- mutex_unlock(&this->lock);
- return ret;
-}
-
-static int doc_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
- struct DiskOnChip *this = mtd->priv;
- __u32 ofs = instr->addr;
- __u32 len = instr->len;
- volatile int dummy;
- void __iomem *docptr = this->virtadr;
- struct Nand *mychip;
- int status;
-
- mutex_lock(&this->lock);
-
- if (ofs & (mtd->erasesize-1) || len & (mtd->erasesize-1)) {
- mutex_unlock(&this->lock);
- return -EINVAL;
- }
-
- instr->state = MTD_ERASING;
-
- /* FIXME: Do this in the background. Use timers or schedule_task() */
- while(len) {
- mychip = &this->chips[ofs >> this->chipshift];
-
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(this, mychip->floor);
- DoC_SelectChip(this, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(this, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- DoC_Command(this, NAND_CMD_ERASE1, 0);
- DoC_Address(this, ADDR_PAGE, ofs, 0, 0);
- DoC_Command(this, NAND_CMD_ERASE2, 0);
-
- DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP);
-
- if (DoC_is_Millennium(this)) {
- ReadDOC(docptr, ReadPipeInit);
- status = ReadDOC(docptr, LastDataRead);
- } else {
- dummy = ReadDOC(docptr, CDSNSlowIO);
- DoC_Delay(this, 2);
- status = ReadDOC_(docptr, this->ioreg);
- }
-
- if (status & 1) {
- printk(KERN_ERR "Error erasing at 0x%x\n", ofs);
- /* There was an error */
- instr->state = MTD_ERASE_FAILED;
- goto callback;
- }
- ofs += mtd->erasesize;
- len -= mtd->erasesize;
- }
- instr->state = MTD_ERASE_DONE;
-
- callback:
- mtd_erase_callback(instr);
-
- mutex_unlock(&this->lock);
- return 0;
-}
-
-
-/****************************************************************************
- *
- * Module stuff
- *
- ****************************************************************************/
-
-static void __exit cleanup_doc2000(void)
-{
- struct mtd_info *mtd;
- struct DiskOnChip *this;
-
- while ((mtd = doc2klist)) {
- this = mtd->priv;
- doc2klist = this->nextdoc;
-
- mtd_device_unregister(mtd);
-
- iounmap(this->virtadr);
- kfree(this->chips);
- kfree(mtd);
- }
-}
-
-module_exit(cleanup_doc2000);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
-MODULE_DESCRIPTION("MTD driver for DiskOnChip 2000 and Millennium");
-
diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
deleted file mode 100644
index f692795..0000000
--- a/drivers/mtd/devices/doc2001.c
+++ /dev/null
@@ -1,824 +0,0 @@
-
-/*
- * Linux driver for Disk-On-Chip Millennium
- * (c) 1999 Machine Vision Holdings, Inc.
- * (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/errno.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/bitops.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/doc2000.h>
-
-/* #define ECC_DEBUG */
-
-/* I have no idea why some DoC chips can not use memcop_form|to_io().
- * This may be due to the different revisions of the ASIC controller built-in or
- * simplily a QA/Bug issue. Who knows ?? If you have trouble, please uncomment
- * this:*/
-#undef USE_MEMCPY
-
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf);
-static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf);
-static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops);
-static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops);
-static int doc_erase (struct mtd_info *mtd, struct erase_info *instr);
-
-static struct mtd_info *docmillist = NULL;
-
-/* Perform the required delay cycles by reading from the NOP register */
-static void DoC_Delay(void __iomem * docptr, unsigned short cycles)
-{
- volatile char dummy;
- int i;
-
- for (i = 0; i < cycles; i++)
- dummy = ReadDOC(docptr, NOP);
-}
-
-/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
-static int _DoC_WaitReady(void __iomem * docptr)
-{
- unsigned short c = 0xffff;
-
- pr_debug("_DoC_WaitReady called for out-of-line wait\n");
-
- /* Out-of-line routine to wait for chip response */
- while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B) && --c)
- ;
-
- if (c == 0)
- pr_debug("_DoC_WaitReady timed out.\n");
-
- return (c == 0);
-}
-
-static inline int DoC_WaitReady(void __iomem * docptr)
-{
- /* This is inline, to optimise the common case, where it's ready instantly */
- int ret = 0;
-
- /* 4 read form NOP register should be issued in prior to the read from CDSNControl
- see Software Requirement 11.4 item 2. */
- DoC_Delay(docptr, 4);
-
- if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B))
- /* Call the out-of-line routine to wait */
- ret = _DoC_WaitReady(docptr);
-
- /* issue 2 read from NOP register after reading from CDSNControl register
- see Software Requirement 11.4 item 2. */
- DoC_Delay(docptr, 2);
-
- return ret;
-}
-
-/* DoC_Command: Send a flash command to the flash chip through the CDSN IO register
- with the internal pipeline. Each of 4 delay cycles (read from the NOP register) is
- required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */
-
-static void DoC_Command(void __iomem * docptr, unsigned char command,
- unsigned char xtraflags)
-{
- /* Assert the CLE (Command Latch Enable) line to the flash chip */
- WriteDOC(xtraflags | CDSN_CTRL_CLE | CDSN_CTRL_CE, docptr, CDSNControl);
- DoC_Delay(docptr, 4);
-
- /* Send the command */
- WriteDOC(command, docptr, Mil_CDSN_IO);
- WriteDOC(0x00, docptr, WritePipeTerm);
-
- /* Lower the CLE line */
- WriteDOC(xtraflags | CDSN_CTRL_CE, docptr, CDSNControl);
- DoC_Delay(docptr, 4);
-}
-
-/* DoC_Address: Set the current address for the flash chip through the CDSN IO register
- with the internal pipeline. Each of 4 delay cycles (read from the NOP register) is
- required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */
-
-static inline void DoC_Address(void __iomem * docptr, int numbytes, unsigned long ofs,
- unsigned char xtraflags1, unsigned char xtraflags2)
-{
- /* Assert the ALE (Address Latch Enable) line to the flash chip */
- WriteDOC(xtraflags1 | CDSN_CTRL_ALE | CDSN_CTRL_CE, docptr, CDSNControl);
- DoC_Delay(docptr, 4);
-
- /* Send the address */
- switch (numbytes)
- {
- case 1:
- /* Send single byte, bits 0-7. */
- WriteDOC(ofs & 0xff, docptr, Mil_CDSN_IO);
- WriteDOC(0x00, docptr, WritePipeTerm);
- break;
- case 2:
- /* Send bits 9-16 followed by 17-23 */
- WriteDOC((ofs >> 9) & 0xff, docptr, Mil_CDSN_IO);
- WriteDOC((ofs >> 17) & 0xff, docptr, Mil_CDSN_IO);
- WriteDOC(0x00, docptr, WritePipeTerm);
- break;
- case 3:
- /* Send 0-7, 9-16, then 17-23 */
- WriteDOC(ofs & 0xff, docptr, Mil_CDSN_IO);
- WriteDOC((ofs >> 9) & 0xff, docptr, Mil_CDSN_IO);
- WriteDOC((ofs >> 17) & 0xff, docptr, Mil_CDSN_IO);
- WriteDOC(0x00, docptr, WritePipeTerm);
- break;
- default:
- return;
- }
-
- /* Lower the ALE line */
- WriteDOC(xtraflags1 | xtraflags2 | CDSN_CTRL_CE, docptr, CDSNControl);
- DoC_Delay(docptr, 4);
-}
-
-/* DoC_SelectChip: Select a given flash chip within the current floor */
-static int DoC_SelectChip(void __iomem * docptr, int chip)
-{
- /* Select the individual flash chip requested */
- WriteDOC(chip, docptr, CDSNDeviceSelect);
- DoC_Delay(docptr, 4);
-
- /* Wait for it to be ready */
- return DoC_WaitReady(docptr);
-}
-
-/* DoC_SelectFloor: Select a given floor (bank of flash chips) */
-static int DoC_SelectFloor(void __iomem * docptr, int floor)
-{
- /* Select the floor (bank) of chips required */
- WriteDOC(floor, docptr, FloorSelect);
-
- /* Wait for the chip to be ready */
- return DoC_WaitReady(docptr);
-}
-
-/* DoC_IdentChip: Identify a given NAND chip given {floor,chip} */
-static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
-{
- int mfr, id, i, j;
- volatile char dummy;
-
- /* Page in the required floor/chip
- FIXME: is this supported by Millennium ?? */
- DoC_SelectFloor(doc->virtadr, floor);
- DoC_SelectChip(doc->virtadr, chip);
-
- /* Reset the chip, see Software Requirement 11.4 item 1. */
- DoC_Command(doc->virtadr, NAND_CMD_RESET, CDSN_CTRL_WP);
- DoC_WaitReady(doc->virtadr);
-
- /* Read the NAND chip ID: 1. Send ReadID command */
- DoC_Command(doc->virtadr, NAND_CMD_READID, CDSN_CTRL_WP);
-
- /* Read the NAND chip ID: 2. Send address byte zero */
- DoC_Address(doc->virtadr, 1, 0x00, CDSN_CTRL_WP, 0x00);
-
- /* Read the manufacturer and device id codes of the flash device through
- CDSN IO register see Software Requirement 11.4 item 5.*/
- dummy = ReadDOC(doc->virtadr, ReadPipeInit);
- DoC_Delay(doc->virtadr, 2);
- mfr = ReadDOC(doc->virtadr, Mil_CDSN_IO);
-
- DoC_Delay(doc->virtadr, 2);
- id = ReadDOC(doc->virtadr, Mil_CDSN_IO);
- dummy = ReadDOC(doc->virtadr, LastDataRead);
-
- /* No response - return failure */
- if (mfr == 0xff || mfr == 0)
- return 0;
-
- /* FIXME: to deal with multi-flash on multi-Millennium case more carefully */
- for (i = 0; nand_flash_ids[i].name != NULL; i++) {
- if ( id == nand_flash_ids[i].id) {
- /* Try to identify manufacturer */
- for (j = 0; nand_manuf_ids[j].id != 0x0; j++) {
- if (nand_manuf_ids[j].id == mfr)
- break;
- }
- printk(KERN_INFO "Flash chip found: Manufacturer ID: %2.2X, "
- "Chip ID: %2.2X (%s:%s)\n",
- mfr, id, nand_manuf_ids[j].name, nand_flash_ids[i].name);
- doc->mfr = mfr;
- doc->id = id;
- doc->chipshift = ffs((nand_flash_ids[i].chipsize << 20)) - 1;
- break;
- }
- }
-
- if (nand_flash_ids[i].name == NULL)
- return 0;
- else
- return 1;
-}
-
-/* DoC_ScanChips: Find all NAND chips present in a DiskOnChip, and identify them */
-static void DoC_ScanChips(struct DiskOnChip *this)
-{
- int floor, chip;
- int numchips[MAX_FLOORS_MIL];
- int ret;
-
- this->numchips = 0;
- this->mfr = 0;
- this->id = 0;
-
- /* For each floor, find the number of valid chips it contains */
- for (floor = 0,ret = 1; floor < MAX_FLOORS_MIL; floor++) {
- numchips[floor] = 0;
- for (chip = 0; chip < MAX_CHIPS_MIL && ret != 0; chip++) {
- ret = DoC_IdentChip(this, floor, chip);
- if (ret) {
- numchips[floor]++;
- this->numchips++;
- }
- }
- }
- /* If there are none at all that we recognise, bail */
- if (!this->numchips) {
- printk("No flash chips recognised.\n");
- return;
- }
-
- /* Allocate an array to hold the information for each chip */
- this->chips = kmalloc(sizeof(struct Nand) * this->numchips, GFP_KERNEL);
- if (!this->chips){
- printk("No memory for allocating chip info structures\n");
- return;
- }
-
- /* Fill out the chip array with {floor, chipno} for each
- * detected chip in the device. */
- for (floor = 0, ret = 0; floor < MAX_FLOORS_MIL; floor++) {
- for (chip = 0 ; chip < numchips[floor] ; chip++) {
- this->chips[ret].floor = floor;
- this->chips[ret].chip = chip;
- this->chips[ret].curadr = 0;
- this->chips[ret].curmode = 0x50;
- ret++;
- }
- }
-
- /* Calculate and print the total size of the device */
- this->totlen = this->numchips * (1 << this->chipshift);
- printk(KERN_INFO "%d flash chips found. Total DiskOnChip size: %ld MiB\n",
- this->numchips ,this->totlen >> 20);
-}
-
-static int DoCMil_is_alias(struct DiskOnChip *doc1, struct DiskOnChip *doc2)
-{
- int tmp1, tmp2, retval;
-
- if (doc1->physadr == doc2->physadr)
- return 1;
-
- /* Use the alias resolution register which was set aside for this
- * purpose. If it's value is the same on both chips, they might
- * be the same chip, and we write to one and check for a change in
- * the other. It's unclear if this register is usuable in the
- * DoC 2000 (it's in the Millenium docs), but it seems to work. */
- tmp1 = ReadDOC(doc1->virtadr, AliasResolution);
- tmp2 = ReadDOC(doc2->virtadr, AliasResolution);
- if (tmp1 != tmp2)
- return 0;
-
- WriteDOC((tmp1+1) % 0xff, doc1->virtadr, AliasResolution);
- tmp2 = ReadDOC(doc2->virtadr, AliasResolution);
- if (tmp2 == (tmp1+1) % 0xff)
- retval = 1;
- else
- retval = 0;
-
- /* Restore register contents. May not be necessary, but do it just to
- * be safe. */
- WriteDOC(tmp1, doc1->virtadr, AliasResolution);
-
- return retval;
-}
-
-/* This routine is found from the docprobe code by symbol_get(),
- * which will bump the use count of this module. */
-void DoCMil_init(struct mtd_info *mtd)
-{
- struct DiskOnChip *this = mtd->priv;
- struct DiskOnChip *old = NULL;
-
- /* We must avoid being called twice for the same device. */
- if (docmillist)
- old = docmillist->priv;
-
- while (old) {
- if (DoCMil_is_alias(this, old)) {
- printk(KERN_NOTICE "Ignoring DiskOnChip Millennium at "
- "0x%lX - already configured\n", this->physadr);
- iounmap(this->virtadr);
- kfree(mtd);
- return;
- }
- if (old->nextdoc)
- old = old->nextdoc->priv;
- else
- old = NULL;
- }
-
- mtd->name = "DiskOnChip Millennium";
- printk(KERN_NOTICE "DiskOnChip Millennium found at address 0x%lX\n",
- this->physadr);
-
- mtd->type = MTD_NANDFLASH;
- mtd->flags = MTD_CAP_NANDFLASH;
-
- /* FIXME: erase size is not always 8KiB */
- mtd->erasesize = 0x2000;
- mtd->writebufsize = mtd->writesize = 512;
- mtd->oobsize = 16;
- mtd->ecc_strength = 2;
- mtd->owner = THIS_MODULE;
- mtd->_erase = doc_erase;
- mtd->_read = doc_read;
- mtd->_write = doc_write;
- mtd->_read_oob = doc_read_oob;
- mtd->_write_oob = doc_write_oob;
- this->curfloor = -1;
- this->curchip = -1;
-
- /* Ident all the chips present. */
- DoC_ScanChips(this);
-
- if (!this->totlen) {
- kfree(mtd);
- iounmap(this->virtadr);
- } else {
- this->nextdoc = docmillist;
- docmillist = mtd;
- mtd->size = this->totlen;
- mtd_device_register(mtd, NULL, 0);
- return;
- }
-}
-EXPORT_SYMBOL_GPL(DoCMil_init);
-
-static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- int i, ret;
- volatile char dummy;
- unsigned char syndrome[6], eccbuf[6];
- struct DiskOnChip *this = mtd->priv;
- void __iomem *docptr = this->virtadr;
- struct Nand *mychip = &this->chips[from >> (this->chipshift)];
-
- /* Don't allow a single read to cross a 512-byte block boundary */
- if (from + len > ((from | 0x1ff) + 1))
- len = ((from | 0x1ff) + 1) - from;
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* issue the Read0 or Read1 command depend on which half of the page
- we are accessing. Polling the Flash Ready bit after issue 3 bytes
- address in Sequence Read Mode, see Software Requirement 11.4 item 1.*/
- DoC_Command(docptr, (from >> 8) & 1, CDSN_CTRL_WP);
- DoC_Address(docptr, 3, from, CDSN_CTRL_WP, 0x00);
- DoC_WaitReady(docptr);
-
- /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/
- WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC (DOC_ECC_EN, docptr, ECCConf);
-
- /* Read the data via the internal pipeline through CDSN IO register,
- see Pipelined Read Operations 11.3 */
- dummy = ReadDOC(docptr, ReadPipeInit);
-#ifndef USE_MEMCPY
- for (i = 0; i < len-1; i++) {
- /* N.B. you have to increase the source address in this way or the
- ECC logic will not work properly */
- buf[i] = ReadDOC(docptr, Mil_CDSN_IO + (i & 0xff));
- }
-#else
- memcpy_fromio(buf, docptr + DoC_Mil_CDSN_IO, len - 1);
-#endif
- buf[len - 1] = ReadDOC(docptr, LastDataRead);
-
- /* Let the caller know we completed it */
- *retlen = len;
- ret = 0;
-
- /* Read the ECC data from Spare Data Area,
- see Reed-Solomon EDC/ECC 11.1 */
- dummy = ReadDOC(docptr, ReadPipeInit);
-#ifndef USE_MEMCPY
- for (i = 0; i < 5; i++) {
- /* N.B. you have to increase the source address in this way or the
- ECC logic will not work properly */
- eccbuf[i] = ReadDOC(docptr, Mil_CDSN_IO + i);
- }
-#else
- memcpy_fromio(eccbuf, docptr + DoC_Mil_CDSN_IO, 5);
-#endif
- eccbuf[5] = ReadDOC(docptr, LastDataRead);
-
- /* Flush the pipeline */
- dummy = ReadDOC(docptr, ECCConf);
- dummy = ReadDOC(docptr, ECCConf);
-
- /* Check the ECC Status */
- if (ReadDOC(docptr, ECCConf) & 0x80) {
- int nb_errors;
- /* There was an ECC error */
-#ifdef ECC_DEBUG
- printk("DiskOnChip ECC Error: Read at %lx\n", (long)from);
-#endif
- /* Read the ECC syndrome through the DiskOnChip ECC logic.
- These syndrome will be all ZERO when there is no error */
- for (i = 0; i < 6; i++) {
- syndrome[i] = ReadDOC(docptr, ECCSyndrome0 + i);
- }
- nb_errors = doc_decode_ecc(buf, syndrome);
-#ifdef ECC_DEBUG
- printk("ECC Errors corrected: %x\n", nb_errors);
-#endif
- if (nb_errors < 0) {
- /* We return error, but have actually done the read. Not that
- this can be told to user-space, via sys_read(), but at least
- MTD-aware stuff can know about it by checking *retlen */
- ret = -EIO;
- }
- }
-
-#ifdef PSYCHO_DEBUG
- printk("ECC DATA at %lx: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
- (long)from, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
- eccbuf[4], eccbuf[5]);
-#endif
-
- /* disable the ECC engine */
- WriteDOC(DOC_ECC_DIS, docptr , ECCConf);
-
- return ret;
-}
-
-static int doc_write (struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- int i,ret = 0;
- char eccbuf[6];
- volatile char dummy;
- struct DiskOnChip *this = mtd->priv;
- void __iomem *docptr = this->virtadr;
- struct Nand *mychip = &this->chips[to >> (this->chipshift)];
-
-#if 0
- /* Don't allow a single write to cross a 512-byte block boundary */
- if (to + len > ( (to | 0x1ff) + 1))
- len = ((to | 0x1ff) + 1) - to;
-#else
- /* Don't allow writes which aren't exactly one block */
- if (to & 0x1ff || len != 0x200)
- return -EINVAL;
-#endif
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* Reset the chip, see Software Requirement 11.4 item 1. */
- DoC_Command(docptr, NAND_CMD_RESET, 0x00);
- DoC_WaitReady(docptr);
- /* Set device to main plane of flash */
- DoC_Command(docptr, NAND_CMD_READ0, 0x00);
-
- /* issue the Serial Data In command to initial the Page Program process */
- DoC_Command(docptr, NAND_CMD_SEQIN, 0x00);
- DoC_Address(docptr, 3, to, 0x00, 0x00);
- DoC_WaitReady(docptr);
-
- /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/
- WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC (DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
-
- /* Write the data via the internal pipeline through CDSN IO register,
- see Pipelined Write Operations 11.2 */
-#ifndef USE_MEMCPY
- for (i = 0; i < len; i++) {
- /* N.B. you have to increase the source address in this way or the
- ECC logic will not work properly */
- WriteDOC(buf[i], docptr, Mil_CDSN_IO + i);
- }
-#else
- memcpy_toio(docptr + DoC_Mil_CDSN_IO, buf, len);
-#endif
- WriteDOC(0x00, docptr, WritePipeTerm);
-
- /* Write ECC data to flash, the ECC info is generated by the DiskOnChip ECC logic
- see Reed-Solomon EDC/ECC 11.1 */
- WriteDOC(0, docptr, NOP);
- WriteDOC(0, docptr, NOP);
- WriteDOC(0, docptr, NOP);
-
- /* Read the ECC data through the DiskOnChip ECC logic */
- for (i = 0; i < 6; i++) {
- eccbuf[i] = ReadDOC(docptr, ECCSyndrome0 + i);
- }
-
- /* ignore the ECC engine */
- WriteDOC(DOC_ECC_DIS, docptr , ECCConf);
-
-#ifndef USE_MEMCPY
- /* Write the ECC data to flash */
- for (i = 0; i < 6; i++) {
- /* N.B. you have to increase the source address in this way or the
- ECC logic will not work properly */
- WriteDOC(eccbuf[i], docptr, Mil_CDSN_IO + i);
- }
-#else
- memcpy_toio(docptr + DoC_Mil_CDSN_IO, eccbuf, 6);
-#endif
-
- /* write the block status BLOCK_USED (0x5555) at the end of ECC data
- FIXME: this is only a hack for programming the IPL area for LinuxBIOS
- and should be replace with proper codes in user space utilities */
- WriteDOC(0x55, docptr, Mil_CDSN_IO);
- WriteDOC(0x55, docptr, Mil_CDSN_IO + 1);
-
- WriteDOC(0x00, docptr, WritePipeTerm);
-
-#ifdef PSYCHO_DEBUG
- printk("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
- (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
- eccbuf[4], eccbuf[5]);
-#endif
-
- /* Commit the Page Program command and wait for ready
- see Software Requirement 11.4 item 1.*/
- DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00);
- DoC_WaitReady(docptr);
-
- /* Read the status of the flash device through CDSN IO register
- see Software Requirement 11.4 item 5.*/
- DoC_Command(docptr, NAND_CMD_STATUS, CDSN_CTRL_WP);
- dummy = ReadDOC(docptr, ReadPipeInit);
- DoC_Delay(docptr, 2);
- if (ReadDOC(docptr, Mil_CDSN_IO) & 1) {
- printk("Error programming flash\n");
- /* Error in programming
- FIXME: implement Bad Block Replacement (in nftl.c ??) */
- ret = -EIO;
- }
- dummy = ReadDOC(docptr, LastDataRead);
-
- /* Let the caller know we completed it */
- *retlen = len;
-
- return ret;
-}
-
-static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops)
-{
-#ifndef USE_MEMCPY
- int i;
-#endif
- volatile char dummy;
- struct DiskOnChip *this = mtd->priv;
- void __iomem *docptr = this->virtadr;
- struct Nand *mychip = &this->chips[ofs >> this->chipshift];
- uint8_t *buf = ops->oobbuf;
- size_t len = ops->len;
-
- BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
-
- ofs += ops->ooboffs;
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* disable the ECC engine */
- WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC (DOC_ECC_DIS, docptr, ECCConf);
-
- /* issue the Read2 command to set the pointer to the Spare Data Area.
- Polling the Flash Ready bit after issue 3 bytes address in
- Sequence Read Mode, see Software Requirement 11.4 item 1.*/
- DoC_Command(docptr, NAND_CMD_READOOB, CDSN_CTRL_WP);
- DoC_Address(docptr, 3, ofs, CDSN_CTRL_WP, 0x00);
- DoC_WaitReady(docptr);
-
- /* Read the data out via the internal pipeline through CDSN IO register,
- see Pipelined Read Operations 11.3 */
- dummy = ReadDOC(docptr, ReadPipeInit);
-#ifndef USE_MEMCPY
- for (i = 0; i < len-1; i++) {
- /* N.B. you have to increase the source address in this way or the
- ECC logic will not work properly */
- buf[i] = ReadDOC(docptr, Mil_CDSN_IO + i);
- }
-#else
- memcpy_fromio(buf, docptr + DoC_Mil_CDSN_IO, len - 1);
-#endif
- buf[len - 1] = ReadDOC(docptr, LastDataRead);
-
- ops->retlen = len;
-
- return 0;
-}
-
-static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops)
-{
-#ifndef USE_MEMCPY
- int i;
-#endif
- volatile char dummy;
- int ret = 0;
- struct DiskOnChip *this = mtd->priv;
- void __iomem *docptr = this->virtadr;
- struct Nand *mychip = &this->chips[ofs >> this->chipshift];
- uint8_t *buf = ops->oobbuf;
- size_t len = ops->len;
-
- BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
-
- ofs += ops->ooboffs;
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* disable the ECC engine */
- WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC (DOC_ECC_DIS, docptr, ECCConf);
-
- /* Reset the chip, see Software Requirement 11.4 item 1. */
- DoC_Command(docptr, NAND_CMD_RESET, CDSN_CTRL_WP);
- DoC_WaitReady(docptr);
- /* issue the Read2 command to set the pointer to the Spare Data Area. */
- DoC_Command(docptr, NAND_CMD_READOOB, CDSN_CTRL_WP);
-
- /* issue the Serial Data In command to initial the Page Program process */
- DoC_Command(docptr, NAND_CMD_SEQIN, 0x00);
- DoC_Address(docptr, 3, ofs, 0x00, 0x00);
-
- /* Write the data via the internal pipeline through CDSN IO register,
- see Pipelined Write Operations 11.2 */
-#ifndef USE_MEMCPY
- for (i = 0; i < len; i++) {
- /* N.B. you have to increase the source address in this way or the
- ECC logic will not work properly */
- WriteDOC(buf[i], docptr, Mil_CDSN_IO + i);
- }
-#else
- memcpy_toio(docptr + DoC_Mil_CDSN_IO, buf, len);
-#endif
- WriteDOC(0x00, docptr, WritePipeTerm);
-
- /* Commit the Page Program command and wait for ready
- see Software Requirement 11.4 item 1.*/
- DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00);
- DoC_WaitReady(docptr);
-
- /* Read the status of the flash device through CDSN IO register
- see Software Requirement 11.4 item 5.*/
- DoC_Command(docptr, NAND_CMD_STATUS, 0x00);
- dummy = ReadDOC(docptr, ReadPipeInit);
- DoC_Delay(docptr, 2);
- if (ReadDOC(docptr, Mil_CDSN_IO) & 1) {
- printk("Error programming oob data\n");
- /* FIXME: implement Bad Block Replacement (in nftl.c ??) */
- ops->retlen = 0;
- ret = -EIO;
- }
- dummy = ReadDOC(docptr, LastDataRead);
-
- ops->retlen = len;
-
- return ret;
-}
-
-int doc_erase (struct mtd_info *mtd, struct erase_info *instr)
-{
- volatile char dummy;
- struct DiskOnChip *this = mtd->priv;
- __u32 ofs = instr->addr;
- __u32 len = instr->len;
- void __iomem *docptr = this->virtadr;
- struct Nand *mychip = &this->chips[ofs >> this->chipshift];
-
- if (len != mtd->erasesize)
- printk(KERN_WARNING "Erase not right size (%x != %x)n",
- len, mtd->erasesize);
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- instr->state = MTD_ERASE_PENDING;
-
- /* issue the Erase Setup command */
- DoC_Command(docptr, NAND_CMD_ERASE1, 0x00);
- DoC_Address(docptr, 2, ofs, 0x00, 0x00);
-
- /* Commit the Erase Start command and wait for ready
- see Software Requirement 11.4 item 1.*/
- DoC_Command(docptr, NAND_CMD_ERASE2, 0x00);
- DoC_WaitReady(docptr);
-
- instr->state = MTD_ERASING;
-
- /* Read the status of the flash device through CDSN IO register
- see Software Requirement 11.4 item 5.
- FIXME: it seems that we are not wait long enough, some blocks are not
- erased fully */
- DoC_Command(docptr, NAND_CMD_STATUS, CDSN_CTRL_WP);
- dummy = ReadDOC(docptr, ReadPipeInit);
- DoC_Delay(docptr, 2);
- if (ReadDOC(docptr, Mil_CDSN_IO) & 1) {
- printk("Error Erasing at 0x%x\n", ofs);
- /* There was an error
- FIXME: implement Bad Block Replacement (in nftl.c ??) */
- instr->state = MTD_ERASE_FAILED;
- } else
- instr->state = MTD_ERASE_DONE;
- dummy = ReadDOC(docptr, LastDataRead);
-
- mtd_erase_callback(instr);
-
- return 0;
-}
-
-/****************************************************************************
- *
- * Module stuff
- *
- ****************************************************************************/
-
-static void __exit cleanup_doc2001(void)
-{
- struct mtd_info *mtd;
- struct DiskOnChip *this;
-
- while ((mtd=docmillist)) {
- this = mtd->priv;
- docmillist = this->nextdoc;
-
- mtd_device_unregister(mtd);
-
- iounmap(this->virtadr);
- kfree(this->chips);
- kfree(mtd);
- }
-}
-
-module_exit(cleanup_doc2001);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
-MODULE_DESCRIPTION("Alternative driver for DiskOnChip Millennium");
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
deleted file mode 100644
index 4f2220a..0000000
--- a/drivers/mtd/devices/doc2001plus.c
+++ /dev/null
@@ -1,1080 +0,0 @@
-/*
- * Linux driver for Disk-On-Chip Millennium Plus
- *
- * (c) 2002-2003 Greg Ungerer <gerg@snapgear.com>
- * (c) 2002-2003 SnapGear Inc
- * (c) 1999 Machine Vision Holdings, Inc.
- * (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
- *
- * Released under GPL
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/errno.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/bitops.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/doc2000.h>
-
-/* #define ECC_DEBUG */
-
-/* I have no idea why some DoC chips can not use memcop_form|to_io().
- * This may be due to the different revisions of the ASIC controller built-in or
- * simplily a QA/Bug issue. Who knows ?? If you have trouble, please uncomment
- * this:*/
-#undef USE_MEMCPY
-
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf);
-static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf);
-static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops);
-static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops);
-static int doc_erase (struct mtd_info *mtd, struct erase_info *instr);
-
-static struct mtd_info *docmilpluslist = NULL;
-
-
-/* Perform the required delay cycles by writing to the NOP register */
-static void DoC_Delay(void __iomem * docptr, int cycles)
-{
- int i;
-
- for (i = 0; (i < cycles); i++)
- WriteDOC(0, docptr, Mplus_NOP);
-}
-
-#define CDSN_CTRL_FR_B_MASK (CDSN_CTRL_FR_B0 | CDSN_CTRL_FR_B1)
-
-/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
-static int _DoC_WaitReady(void __iomem * docptr)
-{
- unsigned int c = 0xffff;
-
- pr_debug("_DoC_WaitReady called for out-of-line wait\n");
-
- /* Out-of-line routine to wait for chip response */
- while (((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) && --c)
- ;
-
- if (c == 0)
- pr_debug("_DoC_WaitReady timed out.\n");
-
- return (c == 0);
-}
-
-static inline int DoC_WaitReady(void __iomem * docptr)
-{
- /* This is inline, to optimise the common case, where it's ready instantly */
- int ret = 0;
-
- /* read form NOP register should be issued prior to the read from CDSNControl
- see Software Requirement 11.4 item 2. */
- DoC_Delay(docptr, 4);
-
- if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK)
- /* Call the out-of-line routine to wait */
- ret = _DoC_WaitReady(docptr);
-
- return ret;
-}
-
-/* For some reason the Millennium Plus seems to occasionally put itself
- * into reset mode. For me this happens randomly, with no pattern that I
- * can detect. M-systems suggest always check this on any block level
- * operation and setting to normal mode if in reset mode.
- */
-static inline void DoC_CheckASIC(void __iomem * docptr)
-{
- /* Make sure the DoC is in normal mode */
- if ((ReadDOC(docptr, Mplus_DOCControl) & DOC_MODE_NORMAL) == 0) {
- WriteDOC((DOC_MODE_NORMAL | DOC_MODE_MDWREN), docptr, Mplus_DOCControl);
- WriteDOC(~(DOC_MODE_NORMAL | DOC_MODE_MDWREN), docptr, Mplus_CtrlConfirm);
- }
-}
-
-/* DoC_Command: Send a flash command to the flash chip through the Flash
- * command register. Need 2 Write Pipeline Terminates to complete send.
- */
-static void DoC_Command(void __iomem * docptr, unsigned char command,
- unsigned char xtraflags)
-{
- WriteDOC(command, docptr, Mplus_FlashCmd);
- WriteDOC(command, docptr, Mplus_WritePipeTerm);
- WriteDOC(command, docptr, Mplus_WritePipeTerm);
-}
-
-/* DoC_Address: Set the current address for the flash chip through the Flash
- * Address register. Need 2 Write Pipeline Terminates to complete send.
- */
-static inline void DoC_Address(struct DiskOnChip *doc, int numbytes,
- unsigned long ofs, unsigned char xtraflags1,
- unsigned char xtraflags2)
-{
- void __iomem * docptr = doc->virtadr;
-
- /* Allow for possible Mill Plus internal flash interleaving */
- ofs >>= doc->interleave;
-
- switch (numbytes) {
- case 1:
- /* Send single byte, bits 0-7. */
- WriteDOC(ofs & 0xff, docptr, Mplus_FlashAddress);
- break;
- case 2:
- /* Send bits 9-16 followed by 17-23 */
- WriteDOC((ofs >> 9) & 0xff, docptr, Mplus_FlashAddress);
- WriteDOC((ofs >> 17) & 0xff, docptr, Mplus_FlashAddress);
- break;
- case 3:
- /* Send 0-7, 9-16, then 17-23 */
- WriteDOC(ofs & 0xff, docptr, Mplus_FlashAddress);
- WriteDOC((ofs >> 9) & 0xff, docptr, Mplus_FlashAddress);
- WriteDOC((ofs >> 17) & 0xff, docptr, Mplus_FlashAddress);
- break;
- default:
- return;
- }
-
- WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
- WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
-}
-
-/* DoC_SelectChip: Select a given flash chip within the current floor */
-static int DoC_SelectChip(void __iomem * docptr, int chip)
-{
- /* No choice for flash chip on Millennium Plus */
- return 0;
-}
-
-/* DoC_SelectFloor: Select a given floor (bank of flash chips) */
-static int DoC_SelectFloor(void __iomem * docptr, int floor)
-{
- WriteDOC((floor & 0x3), docptr, Mplus_DeviceSelect);
- return 0;
-}
-
-/*
- * Translate the given offset into the appropriate command and offset.
- * This does the mapping using the 16bit interleave layout defined by
- * M-Systems, and looks like this for a sector pair:
- * +-----------+-------+-------+-------+--------------+---------+-----------+
- * | 0 --- 511 |512-517|518-519|520-521| 522 --- 1033 |1034-1039|1040 - 1055|
- * +-----------+-------+-------+-------+--------------+---------+-----------+
- * | Data 0 | ECC 0 |Flags0 |Flags1 | Data 1 |ECC 1 | OOB 1 + 2 |
- * +-----------+-------+-------+-------+--------------+---------+-----------+
- */
-/* FIXME: This lives in INFTL not here. Other users of flash devices
- may not want it */
-static unsigned int DoC_GetDataOffset(struct mtd_info *mtd, loff_t *from)
-{
- struct DiskOnChip *this = mtd->priv;
-
- if (this->interleave) {
- unsigned int ofs = *from & 0x3ff;
- unsigned int cmd;
-
- if (ofs < 512) {
- cmd = NAND_CMD_READ0;
- ofs &= 0x1ff;
- } else if (ofs < 1014) {
- cmd = NAND_CMD_READ1;
- ofs = (ofs & 0x1ff) + 10;
- } else {
- cmd = NAND_CMD_READOOB;
- ofs = ofs - 1014;
- }
-
- *from = (*from & ~0x3ff) | ofs;
- return cmd;
- } else {
- /* No interleave */
- if ((*from) & 0x100)
- return NAND_CMD_READ1;
- return NAND_CMD_READ0;
- }
-}
-
-static unsigned int DoC_GetECCOffset(struct mtd_info *mtd, loff_t *from)
-{
- unsigned int ofs, cmd;
-
- if (*from & 0x200) {
- cmd = NAND_CMD_READOOB;
- ofs = 10 + (*from & 0xf);
- } else {
- cmd = NAND_CMD_READ1;
- ofs = (*from & 0xf);
- }
-
- *from = (*from & ~0x3ff) | ofs;
- return cmd;
-}
-
-static unsigned int DoC_GetFlagsOffset(struct mtd_info *mtd, loff_t *from)
-{
- unsigned int ofs, cmd;
-
- cmd = NAND_CMD_READ1;
- ofs = (*from & 0x200) ? 8 : 6;
- *from = (*from & ~0x3ff) | ofs;
- return cmd;
-}
-
-static unsigned int DoC_GetHdrOffset(struct mtd_info *mtd, loff_t *from)
-{
- unsigned int ofs, cmd;
-
- cmd = NAND_CMD_READOOB;
- ofs = (*from & 0x200) ? 24 : 16;
- *from = (*from & ~0x3ff) | ofs;
- return cmd;
-}
-
-static inline void MemReadDOC(void __iomem * docptr, unsigned char *buf, int len)
-{
-#ifndef USE_MEMCPY
- int i;
- for (i = 0; i < len; i++)
- buf[i] = ReadDOC(docptr, Mil_CDSN_IO + i);
-#else
- memcpy_fromio(buf, docptr + DoC_Mil_CDSN_IO, len);
-#endif
-}
-
-static inline void MemWriteDOC(void __iomem * docptr, unsigned char *buf, int len)
-{
-#ifndef USE_MEMCPY
- int i;
- for (i = 0; i < len; i++)
- WriteDOC(buf[i], docptr, Mil_CDSN_IO + i);
-#else
- memcpy_toio(docptr + DoC_Mil_CDSN_IO, buf, len);
-#endif
-}
-
-/* DoC_IdentChip: Identify a given NAND chip given {floor,chip} */
-static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
-{
- int mfr, id, i, j;
- volatile char dummy;
- void __iomem * docptr = doc->virtadr;
-
- /* Page in the required floor/chip */
- DoC_SelectFloor(docptr, floor);
- DoC_SelectChip(docptr, chip);
-
- /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
- WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect);
-
- /* Reset the chip, see Software Requirement 11.4 item 1. */
- DoC_Command(docptr, NAND_CMD_RESET, 0);
- DoC_WaitReady(docptr);
-
- /* Read the NAND chip ID: 1. Send ReadID command */
- DoC_Command(docptr, NAND_CMD_READID, 0);
-
- /* Read the NAND chip ID: 2. Send address byte zero */
- DoC_Address(doc, 1, 0x00, 0, 0x00);
-
- WriteDOC(0, docptr, Mplus_FlashControl);
- DoC_WaitReady(docptr);
-
- /* Read the manufacturer and device id codes of the flash device through
- CDSN IO register see Software Requirement 11.4 item 5.*/
- dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
- dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
-
- mfr = ReadDOC(docptr, Mil_CDSN_IO);
- if (doc->interleave)
- dummy = ReadDOC(docptr, Mil_CDSN_IO); /* 2 way interleave */
-
- id = ReadDOC(docptr, Mil_CDSN_IO);
- if (doc->interleave)
- dummy = ReadDOC(docptr, Mil_CDSN_IO); /* 2 way interleave */
-
- dummy = ReadDOC(docptr, Mplus_LastDataRead);
- dummy = ReadDOC(docptr, Mplus_LastDataRead);
-
- /* Disable flash internally */
- WriteDOC(0, docptr, Mplus_FlashSelect);
-
- /* No response - return failure */
- if (mfr == 0xff || mfr == 0)
- return 0;
-
- for (i = 0; nand_flash_ids[i].name != NULL; i++) {
- if (id == nand_flash_ids[i].id) {
- /* Try to identify manufacturer */
- for (j = 0; nand_manuf_ids[j].id != 0x0; j++) {
- if (nand_manuf_ids[j].id == mfr)
- break;
- }
- printk(KERN_INFO "Flash chip found: Manufacturer ID: %2.2X, "
- "Chip ID: %2.2X (%s:%s)\n", mfr, id,
- nand_manuf_ids[j].name, nand_flash_ids[i].name);
- doc->mfr = mfr;
- doc->id = id;
- doc->chipshift = ffs((nand_flash_ids[i].chipsize << 20)) - 1;
- doc->erasesize = nand_flash_ids[i].erasesize << doc->interleave;
- break;
- }
- }
-
- if (nand_flash_ids[i].name == NULL)
- return 0;
- return 1;
-}
-
-/* DoC_ScanChips: Find all NAND chips present in a DiskOnChip, and identify them */
-static void DoC_ScanChips(struct DiskOnChip *this)
-{
- int floor, chip;
- int numchips[MAX_FLOORS_MPLUS];
- int ret;
-
- this->numchips = 0;
- this->mfr = 0;
- this->id = 0;
-
- /* Work out the intended interleave setting */
- this->interleave = 0;
- if (this->ChipID == DOC_ChipID_DocMilPlus32)
- this->interleave = 1;
-
- /* Check the ASIC agrees */
- if ( (this->interleave << 2) !=
- (ReadDOC(this->virtadr, Mplus_Configuration) & 4)) {
- u_char conf = ReadDOC(this->virtadr, Mplus_Configuration);
- printk(KERN_NOTICE "Setting DiskOnChip Millennium Plus interleave to %s\n",
- this->interleave?"on (16-bit)":"off (8-bit)");
- conf ^= 4;
- WriteDOC(conf, this->virtadr, Mplus_Configuration);
- }
-
- /* For each floor, find the number of valid chips it contains */
- for (floor = 0,ret = 1; floor < MAX_FLOORS_MPLUS; floor++) {
- numchips[floor] = 0;
- for (chip = 0; chip < MAX_CHIPS_MPLUS && ret != 0; chip++) {
- ret = DoC_IdentChip(this, floor, chip);
- if (ret) {
- numchips[floor]++;
- this->numchips++;
- }
- }
- }
- /* If there are none at all that we recognise, bail */
- if (!this->numchips) {
- printk("No flash chips recognised.\n");
- return;
- }
-
- /* Allocate an array to hold the information for each chip */
- this->chips = kmalloc(sizeof(struct Nand) * this->numchips, GFP_KERNEL);
- if (!this->chips){
- printk("MTD: No memory for allocating chip info structures\n");
- return;
- }
-
- /* Fill out the chip array with {floor, chipno} for each
- * detected chip in the device. */
- for (floor = 0, ret = 0; floor < MAX_FLOORS_MPLUS; floor++) {
- for (chip = 0 ; chip < numchips[floor] ; chip++) {
- this->chips[ret].floor = floor;
- this->chips[ret].chip = chip;
- this->chips[ret].curadr = 0;
- this->chips[ret].curmode = 0x50;
- ret++;
- }
- }
-
- /* Calculate and print the total size of the device */
- this->totlen = this->numchips * (1 << this->chipshift);
- printk(KERN_INFO "%d flash chips found. Total DiskOnChip size: %ld MiB\n",
- this->numchips ,this->totlen >> 20);
-}
-
-static int DoCMilPlus_is_alias(struct DiskOnChip *doc1, struct DiskOnChip *doc2)
-{
- int tmp1, tmp2, retval;
-
- if (doc1->physadr == doc2->physadr)
- return 1;
-
- /* Use the alias resolution register which was set aside for this
- * purpose. If it's value is the same on both chips, they might
- * be the same chip, and we write to one and check for a change in
- * the other. It's unclear if this register is usuable in the
- * DoC 2000 (it's in the Millennium docs), but it seems to work. */
- tmp1 = ReadDOC(doc1->virtadr, Mplus_AliasResolution);
- tmp2 = ReadDOC(doc2->virtadr, Mplus_AliasResolution);
- if (tmp1 != tmp2)
- return 0;
-
- WriteDOC((tmp1+1) % 0xff, doc1->virtadr, Mplus_AliasResolution);
- tmp2 = ReadDOC(doc2->virtadr, Mplus_AliasResolution);
- if (tmp2 == (tmp1+1) % 0xff)
- retval = 1;
- else
- retval = 0;
-
- /* Restore register contents. May not be necessary, but do it just to
- * be safe. */
- WriteDOC(tmp1, doc1->virtadr, Mplus_AliasResolution);
-
- return retval;
-}
-
-/* This routine is found from the docprobe code by symbol_get(),
- * which will bump the use count of this module. */
-void DoCMilPlus_init(struct mtd_info *mtd)
-{
- struct DiskOnChip *this = mtd->priv;
- struct DiskOnChip *old = NULL;
-
- /* We must avoid being called twice for the same device. */
- if (docmilpluslist)
- old = docmilpluslist->priv;
-
- while (old) {
- if (DoCMilPlus_is_alias(this, old)) {
- printk(KERN_NOTICE "Ignoring DiskOnChip Millennium "
- "Plus at 0x%lX - already configured\n",
- this->physadr);
- iounmap(this->virtadr);
- kfree(mtd);
- return;
- }
- if (old->nextdoc)
- old = old->nextdoc->priv;
- else
- old = NULL;
- }
-
- mtd->name = "DiskOnChip Millennium Plus";
- printk(KERN_NOTICE "DiskOnChip Millennium Plus found at "
- "address 0x%lX\n", this->physadr);
-
- mtd->type = MTD_NANDFLASH;
- mtd->flags = MTD_CAP_NANDFLASH;
- mtd->writebufsize = mtd->writesize = 512;
- mtd->oobsize = 16;
- mtd->ecc_strength = 2;
- mtd->owner = THIS_MODULE;
- mtd->_erase = doc_erase;
- mtd->_read = doc_read;
- mtd->_write = doc_write;
- mtd->_read_oob = doc_read_oob;
- mtd->_write_oob = doc_write_oob;
- this->curfloor = -1;
- this->curchip = -1;
-
- /* Ident all the chips present. */
- DoC_ScanChips(this);
-
- if (!this->totlen) {
- kfree(mtd);
- iounmap(this->virtadr);
- } else {
- this->nextdoc = docmilpluslist;
- docmilpluslist = mtd;
- mtd->size = this->totlen;
- mtd->erasesize = this->erasesize;
- mtd_device_register(mtd, NULL, 0);
- return;
- }
-}
-EXPORT_SYMBOL_GPL(DoCMilPlus_init);
-
-#if 0
-static int doc_dumpblk(struct mtd_info *mtd, loff_t from)
-{
- int i;
- loff_t fofs;
- struct DiskOnChip *this = mtd->priv;
- void __iomem * docptr = this->virtadr;
- struct Nand *mychip = &this->chips[from >> (this->chipshift)];
- unsigned char *bp, buf[1056];
- char c[32];
-
- from &= ~0x3ff;
-
- /* Don't allow read past end of device */
- if (from >= this->totlen)
- return -EINVAL;
-
- DoC_CheckASIC(docptr);
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
- WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect);
-
- /* Reset the chip, see Software Requirement 11.4 item 1. */
- DoC_Command(docptr, NAND_CMD_RESET, 0);
- DoC_WaitReady(docptr);
-
- fofs = from;
- DoC_Command(docptr, DoC_GetDataOffset(mtd, &fofs), 0);
- DoC_Address(this, 3, fofs, 0, 0x00);
- WriteDOC(0, docptr, Mplus_FlashControl);
- DoC_WaitReady(docptr);
-
- /* disable the ECC engine */
- WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
-
- ReadDOC(docptr, Mplus_ReadPipeInit);
- ReadDOC(docptr, Mplus_ReadPipeInit);
-
- /* Read the data via the internal pipeline through CDSN IO
- register, see Pipelined Read Operations 11.3 */
- MemReadDOC(docptr, buf, 1054);
- buf[1054] = ReadDOC(docptr, Mplus_LastDataRead);
- buf[1055] = ReadDOC(docptr, Mplus_LastDataRead);
-
- memset(&c[0], 0, sizeof(c));
- printk("DUMP OFFSET=%x:\n", (int)from);
-
- for (i = 0, bp = &buf[0]; (i < 1056); i++) {
- if ((i % 16) == 0)
- printk("%08x: ", i);
- printk(" %02x", *bp);
- c[(i & 0xf)] = ((*bp >= 0x20) && (*bp <= 0x7f)) ? *bp : '.';
- bp++;
- if (((i + 1) % 16) == 0)
- printk(" %s\n", c);
- }
- printk("\n");
-
- /* Disable flash internally */
- WriteDOC(0, docptr, Mplus_FlashSelect);
-
- return 0;
-}
-#endif
-
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- int ret, i;
- volatile char dummy;
- loff_t fofs;
- unsigned char syndrome[6], eccbuf[6];
- struct DiskOnChip *this = mtd->priv;
- void __iomem * docptr = this->virtadr;
- struct Nand *mychip = &this->chips[from >> (this->chipshift)];
-
- /* Don't allow a single read to cross a 512-byte block boundary */
- if (from + len > ((from | 0x1ff) + 1))
- len = ((from | 0x1ff) + 1) - from;
-
- DoC_CheckASIC(docptr);
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
- WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect);
-
- /* Reset the chip, see Software Requirement 11.4 item 1. */
- DoC_Command(docptr, NAND_CMD_RESET, 0);
- DoC_WaitReady(docptr);
-
- fofs = from;
- DoC_Command(docptr, DoC_GetDataOffset(mtd, &fofs), 0);
- DoC_Address(this, 3, fofs, 0, 0x00);
- WriteDOC(0, docptr, Mplus_FlashControl);
- DoC_WaitReady(docptr);
-
- /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/
- WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
- WriteDOC(DOC_ECC_EN, docptr, Mplus_ECCConf);
-
- /* Let the caller know we completed it */
- *retlen = len;
- ret = 0;
-
- ReadDOC(docptr, Mplus_ReadPipeInit);
- ReadDOC(docptr, Mplus_ReadPipeInit);
-
- /* Read the data via the internal pipeline through CDSN IO
- register, see Pipelined Read Operations 11.3 */
- MemReadDOC(docptr, buf, len);
-
- /* Read the ECC data following raw data */
- MemReadDOC(docptr, eccbuf, 4);
- eccbuf[4] = ReadDOC(docptr, Mplus_LastDataRead);
- eccbuf[5] = ReadDOC(docptr, Mplus_LastDataRead);
-
- /* Flush the pipeline */
- dummy = ReadDOC(docptr, Mplus_ECCConf);
- dummy = ReadDOC(docptr, Mplus_ECCConf);
-
- /* Check the ECC Status */
- if (ReadDOC(docptr, Mplus_ECCConf) & 0x80) {
- int nb_errors;
- /* There was an ECC error */
-#ifdef ECC_DEBUG
- printk("DiskOnChip ECC Error: Read at %lx\n", (long)from);
-#endif
- /* Read the ECC syndrome through the DiskOnChip ECC logic.
- These syndrome will be all ZERO when there is no error */
- for (i = 0; i < 6; i++)
- syndrome[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i);
-
- nb_errors = doc_decode_ecc(buf, syndrome);
-#ifdef ECC_DEBUG
- printk("ECC Errors corrected: %x\n", nb_errors);
-#endif
- if (nb_errors < 0) {
- /* We return error, but have actually done the
- read. Not that this can be told to user-space, via
- sys_read(), but at least MTD-aware stuff can know
- about it by checking *retlen */
-#ifdef ECC_DEBUG
- printk("%s(%d): Millennium Plus ECC error (from=0x%x:\n",
- __FILE__, __LINE__, (int)from);
- printk(" syndrome= %*phC\n", 6, syndrome);
- printk(" eccbuf= %*phC\n", 6, eccbuf);
-#endif
- ret = -EIO;
- }
- }
-
-#ifdef PSYCHO_DEBUG
- printk("ECC DATA at %lx: %*ph\n", (long)from, 6, eccbuf);
-#endif
- /* disable the ECC engine */
- WriteDOC(DOC_ECC_DIS, docptr , Mplus_ECCConf);
-
- /* Disable flash internally */
- WriteDOC(0, docptr, Mplus_FlashSelect);
-
- return ret;
-}
-
-static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- int i, before, ret = 0;
- loff_t fto;
- volatile char dummy;
- char eccbuf[6];
- struct DiskOnChip *this = mtd->priv;
- void __iomem * docptr = this->virtadr;
- struct Nand *mychip = &this->chips[to >> (this->chipshift)];
-
- /* Don't allow writes which aren't exactly one block (512 bytes) */
- if ((to & 0x1ff) || (len != 0x200))
- return -EINVAL;
-
- /* Determine position of OOB flags, before or after data */
- before = (this->interleave && (to & 0x200));
-
- DoC_CheckASIC(docptr);
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
- WriteDOC(DOC_FLASH_CE, docptr, Mplus_FlashSelect);
-
- /* Reset the chip, see Software Requirement 11.4 item 1. */
- DoC_Command(docptr, NAND_CMD_RESET, 0);
- DoC_WaitReady(docptr);
-
- /* Set device to appropriate plane of flash */
- fto = to;
- WriteDOC(DoC_GetDataOffset(mtd, &fto), docptr, Mplus_FlashCmd);
-
- /* On interleaved devices the flags for 2nd half 512 are before data */
- if (before)
- fto -= 2;
-
- /* issue the Serial Data In command to initial the Page Program process */
- DoC_Command(docptr, NAND_CMD_SEQIN, 0x00);
- DoC_Address(this, 3, fto, 0x00, 0x00);
-
- /* Disable the ECC engine */
- WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
-
- if (before) {
- /* Write the block status BLOCK_USED (0x5555) */
- WriteDOC(0x55, docptr, Mil_CDSN_IO);
- WriteDOC(0x55, docptr, Mil_CDSN_IO);
- }
-
- /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/
- WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, Mplus_ECCConf);
-
- MemWriteDOC(docptr, (unsigned char *) buf, len);
-
- /* Write ECC data to flash, the ECC info is generated by
- the DiskOnChip ECC logic see Reed-Solomon EDC/ECC 11.1 */
- DoC_Delay(docptr, 3);
-
- /* Read the ECC data through the DiskOnChip ECC logic */
- for (i = 0; i < 6; i++)
- eccbuf[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i);
-
- /* disable the ECC engine */
- WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
-
- /* Write the ECC data to flash */
- MemWriteDOC(docptr, eccbuf, 6);
-
- if (!before) {
- /* Write the block status BLOCK_USED (0x5555) */
- WriteDOC(0x55, docptr, Mil_CDSN_IO+6);
- WriteDOC(0x55, docptr, Mil_CDSN_IO+7);
- }
-
-#ifdef PSYCHO_DEBUG
- printk("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
- (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
- eccbuf[4], eccbuf[5]);
-#endif
-
- WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
- WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
-
- /* Commit the Page Program command and wait for ready
- see Software Requirement 11.4 item 1.*/
- DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00);
- DoC_WaitReady(docptr);
-
- /* Read the status of the flash device through CDSN IO register
- see Software Requirement 11.4 item 5.*/
- DoC_Command(docptr, NAND_CMD_STATUS, 0);
- dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
- dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
- DoC_Delay(docptr, 2);
- if ((dummy = ReadDOC(docptr, Mplus_LastDataRead)) & 1) {
- printk("MTD: Error 0x%x programming at 0x%x\n", dummy, (int)to);
- /* Error in programming
- FIXME: implement Bad Block Replacement (in nftl.c ??) */
- ret = -EIO;
- }
- dummy = ReadDOC(docptr, Mplus_LastDataRead);
-
- /* Disable flash internally */
- WriteDOC(0, docptr, Mplus_FlashSelect);
-
- /* Let the caller know we completed it */
- *retlen = len;
-
- return ret;
-}
-
-static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops)
-{
- loff_t fofs, base;
- struct DiskOnChip *this = mtd->priv;
- void __iomem * docptr = this->virtadr;
- struct Nand *mychip = &this->chips[ofs >> this->chipshift];
- size_t i, size, got, want;
- uint8_t *buf = ops->oobbuf;
- size_t len = ops->len;
-
- BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
-
- ofs += ops->ooboffs;
-
- DoC_CheckASIC(docptr);
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
- WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect);
-
- /* disable the ECC engine */
- WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
- DoC_WaitReady(docptr);
-
- /* Maximum of 16 bytes in the OOB region, so limit read to that */
- if (len > 16)
- len = 16;
- got = 0;
- want = len;
-
- for (i = 0; ((i < 3) && (want > 0)); i++) {
- /* Figure out which region we are accessing... */
- fofs = ofs;
- base = ofs & 0xf;
- if (!this->interleave) {
- DoC_Command(docptr, NAND_CMD_READOOB, 0);
- size = 16 - base;
- } else if (base < 6) {
- DoC_Command(docptr, DoC_GetECCOffset(mtd, &fofs), 0);
- size = 6 - base;
- } else if (base < 8) {
- DoC_Command(docptr, DoC_GetFlagsOffset(mtd, &fofs), 0);
- size = 8 - base;
- } else {
- DoC_Command(docptr, DoC_GetHdrOffset(mtd, &fofs), 0);
- size = 16 - base;
- }
- if (size > want)
- size = want;
-
- /* Issue read command */
- DoC_Address(this, 3, fofs, 0, 0x00);
- WriteDOC(0, docptr, Mplus_FlashControl);
- DoC_WaitReady(docptr);
-
- ReadDOC(docptr, Mplus_ReadPipeInit);
- ReadDOC(docptr, Mplus_ReadPipeInit);
- MemReadDOC(docptr, &buf[got], size - 2);
- buf[got + size - 2] = ReadDOC(docptr, Mplus_LastDataRead);
- buf[got + size - 1] = ReadDOC(docptr, Mplus_LastDataRead);
-
- ofs += size;
- got += size;
- want -= size;
- }
-
- /* Disable flash internally */
- WriteDOC(0, docptr, Mplus_FlashSelect);
-
- ops->retlen = len;
- return 0;
-}
-
-static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops)
-{
- volatile char dummy;
- loff_t fofs, base;
- struct DiskOnChip *this = mtd->priv;
- void __iomem * docptr = this->virtadr;
- struct Nand *mychip = &this->chips[ofs >> this->chipshift];
- size_t i, size, got, want;
- int ret = 0;
- uint8_t *buf = ops->oobbuf;
- size_t len = ops->len;
-
- BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
-
- ofs += ops->ooboffs;
-
- DoC_CheckASIC(docptr);
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
- WriteDOC(DOC_FLASH_CE, docptr, Mplus_FlashSelect);
-
-
- /* Maximum of 16 bytes in the OOB region, so limit write to that */
- if (len > 16)
- len = 16;
- got = 0;
- want = len;
-
- for (i = 0; ((i < 3) && (want > 0)); i++) {
- /* Reset the chip, see Software Requirement 11.4 item 1. */
- DoC_Command(docptr, NAND_CMD_RESET, 0);
- DoC_WaitReady(docptr);
-
- /* Figure out which region we are accessing... */
- fofs = ofs;
- base = ofs & 0x0f;
- if (!this->interleave) {
- WriteDOC(NAND_CMD_READOOB, docptr, Mplus_FlashCmd);
- size = 16 - base;
- } else if (base < 6) {
- WriteDOC(DoC_GetECCOffset(mtd, &fofs), docptr, Mplus_FlashCmd);
- size = 6 - base;
- } else if (base < 8) {
- WriteDOC(DoC_GetFlagsOffset(mtd, &fofs), docptr, Mplus_FlashCmd);
- size = 8 - base;
- } else {
- WriteDOC(DoC_GetHdrOffset(mtd, &fofs), docptr, Mplus_FlashCmd);
- size = 16 - base;
- }
- if (size > want)
- size = want;
-
- /* Issue the Serial Data In command to initial the Page Program process */
- DoC_Command(docptr, NAND_CMD_SEQIN, 0x00);
- DoC_Address(this, 3, fofs, 0, 0x00);
-
- /* Disable the ECC engine */
- WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
-
- /* Write the data via the internal pipeline through CDSN IO
- register, see Pipelined Write Operations 11.2 */
- MemWriteDOC(docptr, (unsigned char *) &buf[got], size);
- WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
- WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
-
- /* Commit the Page Program command and wait for ready
- see Software Requirement 11.4 item 1.*/
- DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00);
- DoC_WaitReady(docptr);
-
- /* Read the status of the flash device through CDSN IO register
- see Software Requirement 11.4 item 5.*/
- DoC_Command(docptr, NAND_CMD_STATUS, 0x00);
- dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
- dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
- DoC_Delay(docptr, 2);
- if ((dummy = ReadDOC(docptr, Mplus_LastDataRead)) & 1) {
- printk("MTD: Error 0x%x programming oob at 0x%x\n",
- dummy, (int)ofs);
- /* FIXME: implement Bad Block Replacement */
- ops->retlen = 0;
- ret = -EIO;
- }
- dummy = ReadDOC(docptr, Mplus_LastDataRead);
-
- ofs += size;
- got += size;
- want -= size;
- }
-
- /* Disable flash internally */
- WriteDOC(0, docptr, Mplus_FlashSelect);
-
- ops->retlen = len;
- return ret;
-}
-
-int doc_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
- volatile char dummy;
- struct DiskOnChip *this = mtd->priv;
- __u32 ofs = instr->addr;
- __u32 len = instr->len;
- void __iomem * docptr = this->virtadr;
- struct Nand *mychip = &this->chips[ofs >> this->chipshift];
-
- DoC_CheckASIC(docptr);
-
- if (len != mtd->erasesize)
- printk(KERN_WARNING "MTD: Erase not right size (%x != %x)n",
- len, mtd->erasesize);
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- instr->state = MTD_ERASE_PENDING;
-
- /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
- WriteDOC(DOC_FLASH_CE, docptr, Mplus_FlashSelect);
-
- DoC_Command(docptr, NAND_CMD_RESET, 0x00);
- DoC_WaitReady(docptr);
-
- DoC_Command(docptr, NAND_CMD_ERASE1, 0);
- DoC_Address(this, 2, ofs, 0, 0x00);
- DoC_Command(docptr, NAND_CMD_ERASE2, 0);
- DoC_WaitReady(docptr);
- instr->state = MTD_ERASING;
-
- /* Read the status of the flash device through CDSN IO register
- see Software Requirement 11.4 item 5. */
- DoC_Command(docptr, NAND_CMD_STATUS, 0);
- dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
- dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
- if ((dummy = ReadDOC(docptr, Mplus_LastDataRead)) & 1) {
- printk("MTD: Error 0x%x erasing at 0x%x\n", dummy, ofs);
- /* FIXME: implement Bad Block Replacement (in nftl.c ??) */
- instr->state = MTD_ERASE_FAILED;
- } else {
- instr->state = MTD_ERASE_DONE;
- }
- dummy = ReadDOC(docptr, Mplus_LastDataRead);
-
- /* Disable flash internally */
- WriteDOC(0, docptr, Mplus_FlashSelect);
-
- mtd_erase_callback(instr);
-
- return 0;
-}
-
-/****************************************************************************
- *
- * Module stuff
- *
- ****************************************************************************/
-
-static void __exit cleanup_doc2001plus(void)
-{
- struct mtd_info *mtd;
- struct DiskOnChip *this;
-
- while ((mtd=docmilpluslist)) {
- this = mtd->priv;
- docmilpluslist = this->nextdoc;
-
- mtd_device_unregister(mtd);
-
- iounmap(this->virtadr);
- kfree(this->chips);
- kfree(mtd);
- }
-}
-
-module_exit(cleanup_doc2001plus);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com> et al.");
-MODULE_DESCRIPTION("Driver for DiskOnChip Millennium Plus");
diff --git a/drivers/mtd/devices/docecc.c b/drivers/mtd/devices/docecc.c
deleted file mode 100644
index 4a1c39b..0000000
--- a/drivers/mtd/devices/docecc.c
+++ /dev/null
@@ -1,521 +0,0 @@
-/*
- * ECC algorithm for M-systems disk on chip. We use the excellent Reed
- * Solmon code of Phil Karn (karn@ka9q.ampr.org) available under the
- * GNU GPL License. The rest is simply to convert the disk on chip
- * syndrome into a standard syndome.
- *
- * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
- * Copyright (C) 2000 Netgem S.A.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/errno.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/types.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/doc2000.h>
-
-#define DEBUG_ECC 0
-/* need to undef it (from asm/termbits.h) */
-#undef B0
-
-#define MM 10 /* Symbol size in bits */
-#define KK (1023-4) /* Number of data symbols per block */
-#define B0 510 /* First root of generator polynomial, alpha form */
-#define PRIM 1 /* power of alpha used to generate roots of generator poly */
-#define NN ((1 << MM) - 1)
-
-typedef unsigned short dtype;
-
-/* 1+x^3+x^10 */
-static const int Pp[MM+1] = { 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1 };
-
-/* This defines the type used to store an element of the Galois Field
- * used by the code. Make sure this is something larger than a char if
- * if anything larger than GF(256) is used.
- *
- * Note: unsigned char will work up to GF(256) but int seems to run
- * faster on the Pentium.
- */
-typedef int gf;
-
-/* No legal value in index form represents zero, so
- * we need a special value for this purpose
- */
-#define A0 (NN)
-
-/* Compute x % NN, where NN is 2**MM - 1,
- * without a slow divide
- */
-static inline gf
-modnn(int x)
-{
- while (x >= NN) {
- x -= NN;
- x = (x >> MM) + (x & NN);
- }
- return x;
-}
-
-#define CLEAR(a,n) {\
-int ci;\
-for(ci=(n)-1;ci >=0;ci--)\
-(a)[ci] = 0;\
-}
-
-#define COPY(a,b,n) {\
-int ci;\
-for(ci=(n)-1;ci >=0;ci--)\
-(a)[ci] = (b)[ci];\
-}
-
-#define COPYDOWN(a,b,n) {\
-int ci;\
-for(ci=(n)-1;ci >=0;ci--)\
-(a)[ci] = (b)[ci];\
-}
-
-#define Ldec 1
-
-/* generate GF(2**m) from the irreducible polynomial p(X) in Pp[0]..Pp[m]
- lookup tables: index->polynomial form alpha_to[] contains j=alpha**i;
- polynomial form -> index form index_of[j=alpha**i] = i
- alpha=2 is the primitive element of GF(2**m)
- HARI's COMMENT: (4/13/94) alpha_to[] can be used as follows:
- Let @ represent the primitive element commonly called "alpha" that
- is the root of the primitive polynomial p(x). Then in GF(2^m), for any
- 0 <= i <= 2^m-2,
- @^i = a(0) + a(1) @ + a(2) @^2 + ... + a(m-1) @^(m-1)
- where the binary vector (a(0),a(1),a(2),...,a(m-1)) is the representation
- of the integer "alpha_to[i]" with a(0) being the LSB and a(m-1) the MSB. Thus for
- example the polynomial representation of @^5 would be given by the binary
- representation of the integer "alpha_to[5]".
- Similarly, index_of[] can be used as follows:
- As above, let @ represent the primitive element of GF(2^m) that is
- the root of the primitive polynomial p(x). In order to find the power
- of @ (alpha) that has the polynomial representation
- a(0) + a(1) @ + a(2) @^2 + ... + a(m-1) @^(m-1)
- we consider the integer "i" whose binary representation with a(0) being LSB
- and a(m-1) MSB is (a(0),a(1),...,a(m-1)) and locate the entry
- "index_of[i]". Now, @^index_of[i] is that element whose polynomial
- representation is (a(0),a(1),a(2),...,a(m-1)).
- NOTE:
- The element alpha_to[2^m-1] = 0 always signifying that the
- representation of "@^infinity" = 0 is (0,0,0,...,0).
- Similarly, the element index_of[0] = A0 always signifying
- that the power of alpha which has the polynomial representation
- (0,0,...,0) is "infinity".
-
-*/
-
-static void
-generate_gf(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1])
-{
- register int i, mask;
-
- mask = 1;
- Alpha_to[MM] = 0;
- for (i = 0; i < MM; i++) {
- Alpha_to[i] = mask;
- Index_of[Alpha_to[i]] = i;
- /* If Pp[i] == 1 then, term @^i occurs in poly-repr of @^MM */
- if (Pp[i] != 0)
- Alpha_to[MM] ^= mask; /* Bit-wise EXOR operation */
- mask <<= 1; /* single left-shift */
- }
- Index_of[Alpha_to[MM]] = MM;
- /*
- * Have obtained poly-repr of @^MM. Poly-repr of @^(i+1) is given by
- * poly-repr of @^i shifted left one-bit and accounting for any @^MM
- * term that may occur when poly-repr of @^i is shifted.
- */
- mask >>= 1;
- for (i = MM + 1; i < NN; i++) {
- if (Alpha_to[i - 1] >= mask)
- Alpha_to[i] = Alpha_to[MM] ^ ((Alpha_to[i - 1] ^ mask) << 1);
- else
- Alpha_to[i] = Alpha_to[i - 1] << 1;
- Index_of[Alpha_to[i]] = i;
- }
- Index_of[0] = A0;
- Alpha_to[NN] = 0;
-}
-
-/*
- * Performs ERRORS+ERASURES decoding of RS codes. bb[] is the content
- * of the feedback shift register after having processed the data and
- * the ECC.
- *
- * Return number of symbols corrected, or -1 if codeword is illegal
- * or uncorrectable. If eras_pos is non-null, the detected error locations
- * are written back. NOTE! This array must be at least NN-KK elements long.
- * The corrected data are written in eras_val[]. They must be xor with the data
- * to retrieve the correct data : data[erase_pos[i]] ^= erase_val[i] .
- *
- * First "no_eras" erasures are declared by the calling program. Then, the
- * maximum # of errors correctable is t_after_eras = floor((NN-KK-no_eras)/2).
- * If the number of channel errors is not greater than "t_after_eras" the
- * transmitted codeword will be recovered. Details of algorithm can be found
- * in R. Blahut's "Theory ... of Error-Correcting Codes".
-
- * Warning: the eras_pos[] array must not contain duplicate entries; decoder failure
- * will result. The decoder *could* check for this condition, but it would involve
- * extra time on every decoding operation.
- * */
-static int
-eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
- gf bb[NN - KK + 1], gf eras_val[NN-KK], int eras_pos[NN-KK],
- int no_eras)
-{
- int deg_lambda, el, deg_omega;
- int i, j, r,k;
- gf u,q,tmp,num1,num2,den,discr_r;
- gf lambda[NN-KK + 1], s[NN-KK + 1]; /* Err+Eras Locator poly
- * and syndrome poly */
- gf b[NN-KK + 1], t[NN-KK + 1], omega[NN-KK + 1];
- gf root[NN-KK], reg[NN-KK + 1], loc[NN-KK];
- int syn_error, count;
-
- syn_error = 0;
- for(i=0;i<NN-KK;i++)
- syn_error |= bb[i];
-
- if (!syn_error) {
- /* if remainder is zero, data[] is a codeword and there are no
- * errors to correct. So return data[] unmodified
- */
- count = 0;
- goto finish;
- }
-
- for(i=1;i<=NN-KK;i++){
- s[i] = bb[0];
- }
- for(j=1;j<NN-KK;j++){
- if(bb[j] == 0)
- continue;
- tmp = Index_of[bb[j]];
-
- for(i=1;i<=NN-KK;i++)
- s[i] ^= Alpha_to[modnn(tmp + (B0+i-1)*PRIM*j)];
- }
-
- /* undo the feedback register implicit multiplication and convert
- syndromes to index form */
-
- for(i=1;i<=NN-KK;i++) {
- tmp = Index_of[s[i]];
- if (tmp != A0)
- tmp = modnn(tmp + 2 * KK * (B0+i-1)*PRIM);
- s[i] = tmp;
- }
-
- CLEAR(&lambda[1],NN-KK);
- lambda[0] = 1;
-
- if (no_eras > 0) {
- /* Init lambda to be the erasure locator polynomial */
- lambda[1] = Alpha_to[modnn(PRIM * eras_pos[0])];
- for (i = 1; i < no_eras; i++) {
- u = modnn(PRIM*eras_pos[i]);
- for (j = i+1; j > 0; j--) {
- tmp = Index_of[lambda[j - 1]];
- if(tmp != A0)
- lambda[j] ^= Alpha_to[modnn(u + tmp)];
- }
- }
-#if DEBUG_ECC >= 1
- /* Test code that verifies the erasure locator polynomial just constructed
- Needed only for decoder debugging. */
-
- /* find roots of the erasure location polynomial */
- for(i=1;i<=no_eras;i++)
- reg[i] = Index_of[lambda[i]];
- count = 0;
- for (i = 1,k=NN-Ldec; i <= NN; i++,k = modnn(NN+k-Ldec)) {
- q = 1;
- for (j = 1; j <= no_eras; j++)
- if (reg[j] != A0) {
- reg[j] = modnn(reg[j] + j);
- q ^= Alpha_to[reg[j]];
- }
- if (q != 0)
- continue;
- /* store root and error location number indices */
- root[count] = i;
- loc[count] = k;
- count++;
- }
- if (count != no_eras) {
- printf("\n lambda(x) is WRONG\n");
- count = -1;
- goto finish;
- }
-#if DEBUG_ECC >= 2
- printf("\n Erasure positions as determined by roots of Eras Loc Poly:\n");
- for (i = 0; i < count; i++)
- printf("%d ", loc[i]);
- printf("\n");
-#endif
-#endif
- }
- for(i=0;i<NN-KK+1;i++)
- b[i] = Index_of[lambda[i]];
-
- /*
- * Begin Berlekamp-Massey algorithm to determine error+erasure
- * locator polynomial
- */
- r = no_eras;
- el = no_eras;
- while (++r <= NN-KK) { /* r is the step number */
- /* Compute discrepancy at the r-th step in poly-form */
- discr_r = 0;
- for (i = 0; i < r; i++){
- if ((lambda[i] != 0) && (s[r - i] != A0)) {
- discr_r ^= Alpha_to[modnn(Index_of[lambda[i]] + s[r - i])];
- }
- }
- discr_r = Index_of[discr_r]; /* Index form */
- if (discr_r == A0) {
- /* 2 lines below: B(x) <-- x*B(x) */
- COPYDOWN(&b[1],b,NN-KK);
- b[0] = A0;
- } else {
- /* 7 lines below: T(x) <-- lambda(x) - discr_r*x*b(x) */
- t[0] = lambda[0];
- for (i = 0 ; i < NN-KK; i++) {
- if(b[i] != A0)
- t[i+1] = lambda[i+1] ^ Alpha_to[modnn(discr_r + b[i])];
- else
- t[i+1] = lambda[i+1];
- }
- if (2 * el <= r + no_eras - 1) {
- el = r + no_eras - el;
- /*
- * 2 lines below: B(x) <-- inv(discr_r) *
- * lambda(x)
- */
- for (i = 0; i <= NN-KK; i++)
- b[i] = (lambda[i] == 0) ? A0 : modnn(Index_of[lambda[i]] - discr_r + NN);
- } else {
- /* 2 lines below: B(x) <-- x*B(x) */
- COPYDOWN(&b[1],b,NN-KK);
- b[0] = A0;
- }
- COPY(lambda,t,NN-KK+1);
- }
- }
-
- /* Convert lambda to index form and compute deg(lambda(x)) */
- deg_lambda = 0;
- for(i=0;i<NN-KK+1;i++){
- lambda[i] = Index_of[lambda[i]];
- if(lambda[i] != A0)
- deg_lambda = i;
- }
- /*
- * Find roots of the error+erasure locator polynomial by Chien
- * Search
- */
- COPY(&reg[1],&lambda[1],NN-KK);
- count = 0; /* Number of roots of lambda(x) */
- for (i = 1,k=NN-Ldec; i <= NN; i++,k = modnn(NN+k-Ldec)) {
- q = 1;
- for (j = deg_lambda; j > 0; j--){
- if (reg[j] != A0) {
- reg[j] = modnn(reg[j] + j);
- q ^= Alpha_to[reg[j]];
- }
- }
- if (q != 0)
- continue;
- /* store root (index-form) and error location number */
- root[count] = i;
- loc[count] = k;
- /* If we've already found max possible roots,
- * abort the search to save time
- */
- if(++count == deg_lambda)
- break;
- }
- if (deg_lambda != count) {
- /*
- * deg(lambda) unequal to number of roots => uncorrectable
- * error detected
- */
- count = -1;
- goto finish;
- }
- /*
- * Compute err+eras evaluator poly omega(x) = s(x)*lambda(x) (modulo
- * x**(NN-KK)). in index form. Also find deg(omega).
- */
- deg_omega = 0;
- for (i = 0; i < NN-KK;i++){
- tmp = 0;
- j = (deg_lambda < i) ? deg_lambda : i;
- for(;j >= 0; j--){
- if ((s[i + 1 - j] != A0) && (lambda[j] != A0))
- tmp ^= Alpha_to[modnn(s[i + 1 - j] + lambda[j])];
- }
- if(tmp != 0)
- deg_omega = i;
- omega[i] = Index_of[tmp];
- }
- omega[NN-KK] = A0;
-
- /*
- * Compute error values in poly-form. num1 = omega(inv(X(l))), num2 =
- * inv(X(l))**(B0-1) and den = lambda_pr(inv(X(l))) all in poly-form
- */
- for (j = count-1; j >=0; j--) {
- num1 = 0;
- for (i = deg_omega; i >= 0; i--) {
- if (omega[i] != A0)
- num1 ^= Alpha_to[modnn(omega[i] + i * root[j])];
- }
- num2 = Alpha_to[modnn(root[j] * (B0 - 1) + NN)];
- den = 0;
-
- /* lambda[i+1] for i even is the formal derivative lambda_pr of lambda[i] */
- for (i = min(deg_lambda,NN-KK-1) & ~1; i >= 0; i -=2) {
- if(lambda[i+1] != A0)
- den ^= Alpha_to[modnn(lambda[i+1] + i * root[j])];
- }
- if (den == 0) {
-#if DEBUG_ECC >= 1
- printf("\n ERROR: denominator = 0\n");
-#endif
- /* Convert to dual- basis */
- count = -1;
- goto finish;
- }
- /* Apply error to data */
- if (num1 != 0) {
- eras_val[j] = Alpha_to[modnn(Index_of[num1] + Index_of[num2] + NN - Index_of[den])];
- } else {
- eras_val[j] = 0;
- }
- }
- finish:
- for(i=0;i<count;i++)
- eras_pos[i] = loc[i];
- return count;
-}
-
-/***************************************************************************/
-/* The DOC specific code begins here */
-
-#define SECTOR_SIZE 512
-/* The sector bytes are packed into NB_DATA MM bits words */
-#define NB_DATA (((SECTOR_SIZE + 1) * 8 + 6) / MM)
-
-/*
- * Correct the errors in 'sector[]' by using 'ecc1[]' which is the
- * content of the feedback shift register applyied to the sector and
- * the ECC. Return the number of errors corrected (and correct them in
- * sector), or -1 if error
- */
-int doc_decode_ecc(unsigned char sector[SECTOR_SIZE], unsigned char ecc1[6])
-{
- int parity, i, nb_errors;
- gf bb[NN - KK + 1];
- gf error_val[NN-KK];
- int error_pos[NN-KK], pos, bitpos, index, val;
- dtype *Alpha_to, *Index_of;
-
- /* init log and exp tables here to save memory. However, it is slower */
- Alpha_to = kmalloc((NN + 1) * sizeof(dtype), GFP_KERNEL);
- if (!Alpha_to)
- return -1;
-
- Index_of = kmalloc((NN + 1) * sizeof(dtype), GFP_KERNEL);
- if (!Index_of) {
- kfree(Alpha_to);
- return -1;
- }
-
- generate_gf(Alpha_to, Index_of);
-
- parity = ecc1[1];
-
- bb[0] = (ecc1[4] & 0xff) | ((ecc1[5] & 0x03) << 8);
- bb[1] = ((ecc1[5] & 0xfc) >> 2) | ((ecc1[2] & 0x0f) << 6);
- bb[2] = ((ecc1[2] & 0xf0) >> 4) | ((ecc1[3] & 0x3f) << 4);
- bb[3] = ((ecc1[3] & 0xc0) >> 6) | ((ecc1[0] & 0xff) << 2);
-
- nb_errors = eras_dec_rs(Alpha_to, Index_of, bb,
- error_val, error_pos, 0);
- if (nb_errors <= 0)
- goto the_end;
-
- /* correct the errors */
- for(i=0;i<nb_errors;i++) {
- pos = error_pos[i];
- if (pos >= NB_DATA && pos < KK) {
- nb_errors = -1;
- goto the_end;
- }
- if (pos < NB_DATA) {
- /* extract bit position (MSB first) */
- pos = 10 * (NB_DATA - 1 - pos) - 6;
- /* now correct the following 10 bits. At most two bytes
- can be modified since pos is even */
- index = (pos >> 3) ^ 1;
- bitpos = pos & 7;
- if ((index >= 0 && index < SECTOR_SIZE) ||
- index == (SECTOR_SIZE + 1)) {
- val = error_val[i] >> (2 + bitpos);
- parity ^= val;
- if (index < SECTOR_SIZE)
- sector[index] ^= val;
- }
- index = ((pos >> 3) + 1) ^ 1;
- bitpos = (bitpos + 10) & 7;
- if (bitpos == 0)
- bitpos = 8;
- if ((index >= 0 && index < SECTOR_SIZE) ||
- index == (SECTOR_SIZE + 1)) {
- val = error_val[i] << (8 - bitpos);
- parity ^= val;
- if (index < SECTOR_SIZE)
- sector[index] ^= val;
- }
- }
- }
-
- /* use parity to test extra errors */
- if ((parity & 0xff) != 0)
- nb_errors = -1;
-
- the_end:
- kfree(Alpha_to);
- kfree(Index_of);
- return nb_errors;
-}
-
-EXPORT_SYMBOL_GPL(doc_decode_ecc);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Fabrice Bellard <fabrice.bellard@netgem.com>");
-MODULE_DESCRIPTION("ECC code for correcting errors detected by DiskOnChip 2000 and Millennium ECC hardware");
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 8510ccb..3e1b0a0 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -123,7 +123,7 @@ static inline void doc_flash_address(struct docg3 *docg3, u8 addr)
doc_writeb(docg3, addr, DOC_FLASHADDRESS);
}
-static char const *part_probes[] = { "cmdlinepart", "saftlpart", NULL };
+static char const * const part_probes[] = { "cmdlinepart", "saftlpart", NULL };
static int doc_register_readb(struct docg3 *docg3, int reg)
{
@@ -2144,18 +2144,7 @@ static struct platform_driver g3_driver = {
.remove = __exit_p(docg3_release),
};
-static int __init docg3_init(void)
-{
- return platform_driver_probe(&g3_driver, docg3_probe);
-}
-module_init(docg3_init);
-
-
-static void __exit docg3_exit(void)
-{
- platform_driver_unregister(&g3_driver);
-}
-module_exit(docg3_exit);
+module_platform_driver_probe(g3_driver, docg3_probe);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
diff --git a/drivers/mtd/devices/docprobe.c b/drivers/mtd/devices/docprobe.c
deleted file mode 100644
index 88b3fd3..0000000
--- a/drivers/mtd/devices/docprobe.c
+++ /dev/null
@@ -1,325 +0,0 @@
-
-/* Linux driver for Disk-On-Chip devices */
-/* Probe routines common to all DoC devices */
-/* (C) 1999 Machine Vision Holdings, Inc. */
-/* (C) 1999-2003 David Woodhouse <dwmw2@infradead.org> */
-
-
-/* DOC_PASSIVE_PROBE:
- In order to ensure that the BIOS checksum is correct at boot time, and
- hence that the onboard BIOS extension gets executed, the DiskOnChip
- goes into reset mode when it is read sequentially: all registers
- return 0xff until the chip is woken up again by writing to the
- DOCControl register.
-
- Unfortunately, this means that the probe for the DiskOnChip is unsafe,
- because one of the first things it does is write to where it thinks
- the DOCControl register should be - which may well be shared memory
- for another device. I've had machines which lock up when this is
- attempted. Hence the possibility to do a passive probe, which will fail
- to detect a chip in reset mode, but is at least guaranteed not to lock
- the machine.
-
- If you have this problem, uncomment the following line:
-#define DOC_PASSIVE_PROBE
-*/
-
-
-/* DOC_SINGLE_DRIVER:
- Millennium driver has been merged into DOC2000 driver.
-
- The old Millennium-only driver has been retained just in case there
- are problems with the new code. If the combined driver doesn't work
- for you, you can try the old one by undefining DOC_SINGLE_DRIVER
- below and also enabling it in your configuration. If this fixes the
- problems, please send a report to the MTD mailing list at
- <linux-mtd@lists.infradead.org>.
-*/
-#define DOC_SINGLE_DRIVER
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/errno.h>
-#include <asm/io.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/types.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/doc2000.h>
-
-
-static unsigned long doc_config_location = CONFIG_MTD_DOCPROBE_ADDRESS;
-module_param(doc_config_location, ulong, 0);
-MODULE_PARM_DESC(doc_config_location, "Physical memory address at which to probe for DiskOnChip");
-
-static unsigned long __initdata doc_locations[] = {
-#if defined (__alpha__) || defined(__i386__) || defined(__x86_64__)
-#ifdef CONFIG_MTD_DOCPROBE_HIGH
- 0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000,
- 0xfffd0000, 0xfffd2000, 0xfffd4000, 0xfffd6000,
- 0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000,
- 0xfffe0000, 0xfffe2000, 0xfffe4000, 0xfffe6000,
- 0xfffe8000, 0xfffea000, 0xfffec000, 0xfffee000,
-#else /* CONFIG_MTD_DOCPROBE_HIGH */
- 0xc8000, 0xca000, 0xcc000, 0xce000,
- 0xd0000, 0xd2000, 0xd4000, 0xd6000,
- 0xd8000, 0xda000, 0xdc000, 0xde000,
- 0xe0000, 0xe2000, 0xe4000, 0xe6000,
- 0xe8000, 0xea000, 0xec000, 0xee000,
-#endif /* CONFIG_MTD_DOCPROBE_HIGH */
-#endif
- 0xffffffff };
-
-/* doccheck: Probe a given memory window to see if there's a DiskOnChip present */
-
-static inline int __init doccheck(void __iomem *potential, unsigned long physadr)
-{
- void __iomem *window=potential;
- unsigned char tmp, tmpb, tmpc, ChipID;
-#ifndef DOC_PASSIVE_PROBE
- unsigned char tmp2;
-#endif
-
- /* Routine copied from the Linux DOC driver */
-
-#ifdef CONFIG_MTD_DOCPROBE_55AA
- /* Check for 0x55 0xAA signature at beginning of window,
- this is no longer true once we remove the IPL (for Millennium */
- if (ReadDOC(window, Sig1) != 0x55 || ReadDOC(window, Sig2) != 0xaa)
- return 0;
-#endif /* CONFIG_MTD_DOCPROBE_55AA */
-
-#ifndef DOC_PASSIVE_PROBE
- /* It's not possible to cleanly detect the DiskOnChip - the
- * bootup procedure will put the device into reset mode, and
- * it's not possible to talk to it without actually writing
- * to the DOCControl register. So we store the current contents
- * of the DOCControl register's location, in case we later decide
- * that it's not a DiskOnChip, and want to put it back how we
- * found it.
- */
- tmp2 = ReadDOC(window, DOCControl);
-
- /* Reset the DiskOnChip ASIC */
- WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET,
- window, DOCControl);
- WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET,
- window, DOCControl);
-
- /* Enable the DiskOnChip ASIC */
- WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL,
- window, DOCControl);
- WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL,
- window, DOCControl);
-#endif /* !DOC_PASSIVE_PROBE */
-
- /* We need to read the ChipID register four times. For some
- newer DiskOnChip 2000 units, the first three reads will
- return the DiskOnChip Millennium ident. Don't ask. */
- ChipID = ReadDOC(window, ChipID);
-
- switch (ChipID) {
- case DOC_ChipID_Doc2k:
- /* Check the TOGGLE bit in the ECC register */
- tmp = ReadDOC(window, 2k_ECCStatus) & DOC_TOGGLE_BIT;
- tmpb = ReadDOC(window, 2k_ECCStatus) & DOC_TOGGLE_BIT;
- tmpc = ReadDOC(window, 2k_ECCStatus) & DOC_TOGGLE_BIT;
- if (tmp != tmpb && tmp == tmpc)
- return ChipID;
- break;
-
- case DOC_ChipID_DocMil:
- /* Check for the new 2000 with Millennium ASIC */
- ReadDOC(window, ChipID);
- ReadDOC(window, ChipID);
- if (ReadDOC(window, ChipID) != DOC_ChipID_DocMil)
- ChipID = DOC_ChipID_Doc2kTSOP;
-
- /* Check the TOGGLE bit in the ECC register */
- tmp = ReadDOC(window, ECCConf) & DOC_TOGGLE_BIT;
- tmpb = ReadDOC(window, ECCConf) & DOC_TOGGLE_BIT;
- tmpc = ReadDOC(window, ECCConf) & DOC_TOGGLE_BIT;
- if (tmp != tmpb && tmp == tmpc)
- return ChipID;
- break;
-
- case DOC_ChipID_DocMilPlus16:
- case DOC_ChipID_DocMilPlus32:
- case 0:
- /* Possible Millennium+, need to do more checks */
-#ifndef DOC_PASSIVE_PROBE
- /* Possibly release from power down mode */
- for (tmp = 0; (tmp < 4); tmp++)
- ReadDOC(window, Mplus_Power);
-
- /* Reset the DiskOnChip ASIC */
- tmp = DOC_MODE_RESET | DOC_MODE_MDWREN | DOC_MODE_RST_LAT |
- DOC_MODE_BDECT;
- WriteDOC(tmp, window, Mplus_DOCControl);
- WriteDOC(~tmp, window, Mplus_CtrlConfirm);
-
- mdelay(1);
- /* Enable the DiskOnChip ASIC */
- tmp = DOC_MODE_NORMAL | DOC_MODE_MDWREN | DOC_MODE_RST_LAT |
- DOC_MODE_BDECT;
- WriteDOC(tmp, window, Mplus_DOCControl);
- WriteDOC(~tmp, window, Mplus_CtrlConfirm);
- mdelay(1);
-#endif /* !DOC_PASSIVE_PROBE */
-
- ChipID = ReadDOC(window, ChipID);
-
- switch (ChipID) {
- case DOC_ChipID_DocMilPlus16:
- case DOC_ChipID_DocMilPlus32:
- /* Check the TOGGLE bit in the toggle register */
- tmp = ReadDOC(window, Mplus_Toggle) & DOC_TOGGLE_BIT;
- tmpb = ReadDOC(window, Mplus_Toggle) & DOC_TOGGLE_BIT;
- tmpc = ReadDOC(window, Mplus_Toggle) & DOC_TOGGLE_BIT;
- if (tmp != tmpb && tmp == tmpc)
- return ChipID;
- default:
- break;
- }
- /* FALL TRHU */
-
- default:
-
-#ifdef CONFIG_MTD_DOCPROBE_55AA
- printk(KERN_DEBUG "Possible DiskOnChip with unknown ChipID %2.2X found at 0x%lx\n",
- ChipID, physadr);
-#endif
-#ifndef DOC_PASSIVE_PROBE
- /* Put back the contents of the DOCControl register, in case it's not
- * actually a DiskOnChip.
- */
- WriteDOC(tmp2, window, DOCControl);
-#endif
- return 0;
- }
-
- printk(KERN_WARNING "DiskOnChip failed TOGGLE test, dropping.\n");
-
-#ifndef DOC_PASSIVE_PROBE
- /* Put back the contents of the DOCControl register: it's not a DiskOnChip */
- WriteDOC(tmp2, window, DOCControl);
-#endif
- return 0;
-}
-
-static int docfound;
-
-extern void DoC2k_init(struct mtd_info *);
-extern void DoCMil_init(struct mtd_info *);
-extern void DoCMilPlus_init(struct mtd_info *);
-
-static void __init DoC_Probe(unsigned long physadr)
-{
- void __iomem *docptr;
- struct DiskOnChip *this;
- struct mtd_info *mtd;
- int ChipID;
- char namebuf[15];
- char *name = namebuf;
- void (*initroutine)(struct mtd_info *) = NULL;
-
- docptr = ioremap(physadr, DOC_IOREMAP_LEN);
-
- if (!docptr)
- return;
-
- if ((ChipID = doccheck(docptr, physadr))) {
- if (ChipID == DOC_ChipID_Doc2kTSOP) {
- /* Remove this at your own peril. The hardware driver works but nothing prevents you from erasing bad blocks */
- printk(KERN_NOTICE "Refusing to drive DiskOnChip 2000 TSOP until Bad Block Table is correctly supported by INFTL\n");
- iounmap(docptr);
- return;
- }
- docfound = 1;
- mtd = kzalloc(sizeof(struct DiskOnChip) + sizeof(struct mtd_info), GFP_KERNEL);
- if (!mtd) {
- printk(KERN_WARNING "Cannot allocate memory for data structures. Dropping.\n");
- iounmap(docptr);
- return;
- }
-
- this = (struct DiskOnChip *)(&mtd[1]);
- mtd->priv = this;
- this->virtadr = docptr;
- this->physadr = physadr;
- this->ChipID = ChipID;
- sprintf(namebuf, "with ChipID %2.2X", ChipID);
-
- switch(ChipID) {
- case DOC_ChipID_Doc2kTSOP:
- name="2000 TSOP";
- initroutine = symbol_request(DoC2k_init);
- break;
-
- case DOC_ChipID_Doc2k:
- name="2000";
- initroutine = symbol_request(DoC2k_init);
- break;
-
- case DOC_ChipID_DocMil:
- name="Millennium";
-#ifdef DOC_SINGLE_DRIVER
- initroutine = symbol_request(DoC2k_init);
-#else
- initroutine = symbol_request(DoCMil_init);
-#endif /* DOC_SINGLE_DRIVER */
- break;
-
- case DOC_ChipID_DocMilPlus16:
- case DOC_ChipID_DocMilPlus32:
- name="MillenniumPlus";
- initroutine = symbol_request(DoCMilPlus_init);
- break;
- }
-
- if (initroutine) {
- (*initroutine)(mtd);
- symbol_put_addr(initroutine);
- return;
- }
- printk(KERN_NOTICE "Cannot find driver for DiskOnChip %s at 0x%lX\n", name, physadr);
- kfree(mtd);
- }
- iounmap(docptr);
-}
-
-
-/****************************************************************************
- *
- * Module stuff
- *
- ****************************************************************************/
-
-static int __init init_doc(void)
-{
- int i;
-
- if (doc_config_location) {
- printk(KERN_INFO "Using configured DiskOnChip probe address 0x%lx\n", doc_config_location);
- DoC_Probe(doc_config_location);
- } else {
- for (i=0; (doc_locations[i] != 0xffffffff); i++) {
- DoC_Probe(doc_locations[i]);
- }
- }
- /* No banner message any more. Print a message if no DiskOnChip
- found, so the user knows we at least tried. */
- if (!docfound)
- printk(KERN_INFO "No recognised DiskOnChip devices found\n");
- return -EAGAIN;
-}
-
-module_init(init_doc);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
-MODULE_DESCRIPTION("Probe code for DiskOnChip 2000 and Millennium devices");
-
diff --git a/drivers/mtd/devices/elm.c b/drivers/mtd/devices/elm.c
index 2ec5da9..dccef9f 100644
--- a/drivers/mtd/devices/elm.c
+++ b/drivers/mtd/devices/elm.c
@@ -81,14 +81,21 @@ static u32 elm_read_reg(struct elm_info *info, int offset)
* @dev: ELM device
* @bch_type: Type of BCH ecc
*/
-void elm_config(struct device *dev, enum bch_ecc bch_type)
+int elm_config(struct device *dev, enum bch_ecc bch_type)
{
u32 reg_val;
struct elm_info *info = dev_get_drvdata(dev);
+ if (!info) {
+ dev_err(dev, "Unable to configure elm - device not probed?\n");
+ return -ENODEV;
+ }
+
reg_val = (bch_type & ECC_BCH_LEVEL_MASK) | (ELM_ECC_SIZE << 16);
elm_write_reg(info, ELM_LOCATION_CONFIG, reg_val);
info->bch_type = bch_type;
+
+ return 0;
}
EXPORT_SYMBOL(elm_config);
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 5b6b072..2f3d2a5 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -681,6 +681,7 @@ struct flash_info {
u16 flags;
#define SECT_4K 0x01 /* OPCODE_BE_4K works uniformly */
#define M25P_NO_ERASE 0x02 /* No erase command needed */
+#define SST_WRITE 0x04 /* use SST byte programming */
};
#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
@@ -728,6 +729,7 @@ static const struct spi_device_id m25p_ids[] = {
{ "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
{ "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
{ "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
+ { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
/* Everspin */
{ "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2) },
@@ -740,7 +742,6 @@ static const struct spi_device_id m25p_ids[] = {
{ "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
{ "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
{ "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
- { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) },
/* Macronix */
{ "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
@@ -753,8 +754,10 @@ static const struct spi_device_id m25p_ids[] = {
{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
+ { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, 0) },
/* Micron */
+ { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) },
{ "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, 0) },
{ "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
{ "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) },
@@ -781,14 +784,15 @@ static const struct spi_device_id m25p_ids[] = {
{ "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
- { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K) },
- { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K) },
- { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K) },
- { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K) },
- { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K) },
- { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K) },
- { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K) },
- { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K) },
+ { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
+ { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
+ { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
+ { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
+ { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
+ { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
+ { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
+ { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
+ { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
/* ST Microelectronics -- newer production may have feature updates */
{ "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
@@ -838,6 +842,7 @@ static const struct spi_device_id m25p_ids[] = {
{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
{ "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) },
/* Catalyst / On Semiconductor -- non-JEDEC */
@@ -1000,7 +1005,7 @@ static int m25p_probe(struct spi_device *spi)
}
/* sst flash chips use AAI word program */
- if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST)
+ if (info->flags & SST_WRITE)
flash->mtd._write = sst_write;
else
flash->mtd._write = m25p80_write;
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 945c9f76..28779b6 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -105,8 +105,6 @@ static const struct of_device_id dataflash_dt_ids[] = {
{ .compatible = "atmel,dataflash", },
{ /* sentinel */ }
};
-#else
-#define dataflash_dt_ids NULL
#endif
/* ......................................................................... */
@@ -914,7 +912,7 @@ static struct spi_driver dataflash_driver = {
.driver = {
.name = "mtd_dataflash",
.owner = THIS_MODULE,
- .of_match_table = dataflash_dt_ids,
+ .of_match_table = of_match_ptr(dataflash_dt_ids),
},
.probe = dataflash_probe,
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 3ed17c4..bed9d58 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -249,22 +249,6 @@ config MTD_LANTIQ
help
Support for NOR flash attached to the Lantiq SoC's External Bus Unit.
-config MTD_DILNETPC
- tristate "CFI Flash device mapped on DIL/Net PC"
- depends on X86 && MTD_CFI_INTELEXT && BROKEN
- help
- MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP".
- For details, see <http://www.ssv-embedded.de/ssv/pc104/p169.htm>
- and <http://www.ssv-embedded.de/ssv/pc104/p170.htm>
-
-config MTD_DILNETPC_BOOTSIZE
- hex "Size of DIL/Net PC flash boot partition"
- depends on MTD_DILNETPC
- default "0x80000"
- help
- The amount of space taken up by the kernel or Etherboot
- on the DIL/Net PC flash chips.
-
config MTD_L440GX
tristate "BIOS flash chip on Intel L440GX boards"
depends on X86 && MTD_JEDECPROBE
@@ -274,42 +258,6 @@ config MTD_L440GX
BE VERY CAREFUL.
-config MTD_TQM8XXL
- tristate "CFI Flash device mapped on TQM8XXL"
- depends on MTD_CFI && TQM8xxL
- help
- The TQM8xxL PowerPC board has up to two banks of CFI-compliant
- chips, currently uses AMD one. This 'mapping' driver supports
- that arrangement, allowing the CFI probe and command set driver
- code to communicate with the chips on the TQM8xxL board. More at
- <http://www.denx.de/wiki/PPCEmbedded/>.
-
-config MTD_RPXLITE
- tristate "CFI Flash device mapped on RPX Lite or CLLF"
- depends on MTD_CFI && (RPXCLASSIC || RPXLITE)
- help
- The RPXLite PowerPC board has CFI-compliant chips mapped in
- a strange sparse mapping. This 'mapping' driver supports that
- arrangement, allowing the CFI probe and command set driver code
- to communicate with the chips on the RPXLite board. More at
- <http://www.embeddedplanet.com/>.
-
-config MTD_MBX860
- tristate "System flash on MBX860 board"
- depends on MTD_CFI && MBX
- help
- This enables access routines for the flash chips on the Motorola
- MBX860 board. If you have one of these boards and would like
- to use the flash chips on it, say 'Y'.
-
-config MTD_DBOX2
- tristate "CFI Flash device mapped on D-Box2"
- depends on DBOX2 && MTD_CFI_INTELSTD && MTD_CFI_INTELEXT && MTD_CFI_AMDSTD
- help
- This enables access routines for the flash chips on the Nokia/Sagem
- D-Box 2 board. If you have one of these boards and would like to use
- the flash chips on it, say 'Y'.
-
config MTD_CFI_FLAGADM
tristate "CFI Flash device mapping on FlagaDM"
depends on 8xx && MTD_CFI
@@ -349,15 +297,6 @@ config MTD_IXP4XX
IXDP425 and Coyote. If you have an IXP4xx based board and
would like to use the flash chips on it, say 'Y'.
-config MTD_IXP2000
- tristate "CFI Flash device mapped on Intel IXP2000 based systems"
- depends on MTD_CFI && MTD_COMPLEX_MAPPINGS && ARCH_IXP2000
- help
- This enables MTD access to flash devices on platforms based
- on Intel's IXP2000 family of network processors. If you have an
- IXP2000 based board and would like to use the flash chips on it,
- say 'Y'.
-
config MTD_AUTCPU12
bool "NV-RAM mapping AUTCPU12 board"
depends on ARCH_AUTCPU12
@@ -372,13 +311,6 @@ config MTD_IMPA7
This enables access to the NOR Flash on the impA7 board of
implementa GmbH. If you have such a board, say 'Y' here.
-config MTD_H720X
- tristate "Hynix evaluation board mappings"
- depends on MTD_CFI && ( ARCH_H7201 || ARCH_H7202 )
- help
- This enables access to the flash chips on the Hynix evaluation boards.
- If you have such a board, say 'Y'.
-
# This needs CFI or JEDEC, depending on the cards found.
config MTD_PCI
tristate "PCI MTD driver"
@@ -419,7 +351,7 @@ config MTD_BFIN_ASYNC
config MTD_GPIO_ADDR
tristate "GPIO-assisted Flash Chip Support"
- depends on GENERIC_GPIO || GPIOLIB
+ depends on GPIOLIB
depends on MTD_COMPLEX_MAPPINGS
help
Map driver which allows flashes to be partially physically addressed
@@ -433,15 +365,6 @@ config MTD_UCLINUX
help
Map driver to support image based filesystems for uClinux.
-config MTD_DMV182
- tristate "Map driver for Dy-4 SVME/DMV-182 board."
- depends on DMV182
- select MTD_MAP_BANK_WIDTH_32
- select MTD_CFI_I8
- select MTD_CFI_AMDSTD
- help
- Map driver for Dy-4 SVME/DMV-182 board.
-
config MTD_INTEL_VR_NOR
tristate "NOR flash on Intel Vermilion Range Expansion Bus CS0"
depends on PCI
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 4ded287..395a124 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -9,7 +9,6 @@ endif
# Chip mappings
obj-$(CONFIG_MTD_CFI_FLAGADM) += cfi_flagadm.o
obj-$(CONFIG_MTD_DC21285) += dc21285.o
-obj-$(CONFIG_MTD_DILNETPC) += dilnetpc.o
obj-$(CONFIG_MTD_L440GX) += l440gx.o
obj-$(CONFIG_MTD_AMD76XROM) += amd76xrom.o
obj-$(CONFIG_MTD_ESB2ROM) += esb2rom.o
@@ -17,15 +16,12 @@ obj-$(CONFIG_MTD_ICHXROM) += ichxrom.o
obj-$(CONFIG_MTD_CK804XROM) += ck804xrom.o
obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o
obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o
-obj-$(CONFIG_MTD_MBX860) += mbx860.o
obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o
obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
obj-$(CONFIG_MTD_PISMO) += pismo.o
obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o
obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
-obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o
-obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o
obj-$(CONFIG_MTD_SA1100) += sa1100-flash.o
obj-$(CONFIG_MTD_SBC_GXX) += sbc_gxx.o
obj-$(CONFIG_MTD_SC520CDP) += sc520cdp.o
@@ -34,7 +30,6 @@ obj-$(CONFIG_MTD_TS5500) += ts5500_flash.o
obj-$(CONFIG_MTD_SUN_UFLASH) += sun_uflash.o
obj-$(CONFIG_MTD_VMAX) += vmax301.o
obj-$(CONFIG_MTD_SCx200_DOCFLASH)+= scx200_docflash.o
-obj-$(CONFIG_MTD_DBOX2) += dbox2-flash.o
obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o
obj-$(CONFIG_MTD_PCI) += pci.o
obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o
@@ -42,10 +37,7 @@ obj-$(CONFIG_MTD_IMPA7) += impa7.o
obj-$(CONFIG_MTD_UCLINUX) += uclinux.o
obj-$(CONFIG_MTD_NETtel) += nettel.o
obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o
-obj-$(CONFIG_MTD_H720X) += h720x-flash.o
obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o
-obj-$(CONFIG_MTD_IXP2000) += ixp2000.o
-obj-$(CONFIG_MTD_DMV182) += dmv182.o
obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o
obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o
obj-$(CONFIG_MTD_BFIN_ASYNC) += bfin-async-flash.o
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index f833edf..319b04a 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -122,7 +122,8 @@ static void bfin_flash_copy_to(struct map_info *map, unsigned long to, const voi
switch_back(state);
}
-static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
+static const char * const part_probe_types[] = {
+ "cmdlinepart", "RedBoot", NULL };
static int bfin_flash_probe(struct platform_device *pdev)
{
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index 586a1c7..0455166 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -308,8 +308,7 @@ static int ck804xrom_init_one(struct pci_dev *pdev,
out:
/* Free any left over map structures */
- if (map)
- kfree(map);
+ kfree(map);
/* See if I have any map structures */
if (list_empty(&window->maps)) {
diff --git a/drivers/mtd/maps/dbox2-flash.c b/drivers/mtd/maps/dbox2-flash.c
deleted file mode 100644
index 85bdece..0000000
--- a/drivers/mtd/maps/dbox2-flash.c
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * D-Box 2 flash driver
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <linux/errno.h>
-
-/* partition_info gives details on the logical partitions that the split the
- * single flash device into. If the size if zero we use up to the end of the
- * device. */
-static struct mtd_partition partition_info[]= {
- {
- .name = "BR bootloader",
- .size = 128 * 1024,
- .offset = 0,
- .mask_flags = MTD_WRITEABLE
- },
- {
- .name = "FLFS (U-Boot)",
- .size = 128 * 1024,
- .offset = MTDPART_OFS_APPEND,
- .mask_flags = 0
- },
- {
- .name = "Root (SquashFS)",
- .size = 7040 * 1024,
- .offset = MTDPART_OFS_APPEND,
- .mask_flags = 0
- },
- {
- .name = "var (JFFS2)",
- .size = 896 * 1024,
- .offset = MTDPART_OFS_APPEND,
- .mask_flags = 0
- },
- {
- .name = "Flash without bootloader",
- .size = MTDPART_SIZ_FULL,
- .offset = 128 * 1024,
- .mask_flags = 0
- },
- {
- .name = "Complete Flash",
- .size = MTDPART_SIZ_FULL,
- .offset = 0,
- .mask_flags = MTD_WRITEABLE
- }
-};
-
-#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
-
-#define WINDOW_ADDR 0x10000000
-#define WINDOW_SIZE 0x800000
-
-static struct mtd_info *mymtd;
-
-
-struct map_info dbox2_flash_map = {
- .name = "D-Box 2 flash memory",
- .size = WINDOW_SIZE,
- .bankwidth = 4,
- .phys = WINDOW_ADDR,
-};
-
-static int __init init_dbox2_flash(void)
-{
- printk(KERN_NOTICE "D-Box 2 flash driver (size->0x%X mem->0x%X)\n", WINDOW_SIZE, WINDOW_ADDR);
- dbox2_flash_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
-
- if (!dbox2_flash_map.virt) {
- printk("Failed to ioremap\n");
- return -EIO;
- }
- simple_map_init(&dbox2_flash_map);
-
- // Probe for dual Intel 28F320 or dual AMD
- mymtd = do_map_probe("cfi_probe", &dbox2_flash_map);
- if (!mymtd) {
- // Probe for single Intel 28F640
- dbox2_flash_map.bankwidth = 2;
-
- mymtd = do_map_probe("cfi_probe", &dbox2_flash_map);
- }
-
- if (mymtd) {
- mymtd->owner = THIS_MODULE;
-
- /* Create MTD devices for each partition. */
- mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
-
- return 0;
- }
-
- iounmap((void *)dbox2_flash_map.virt);
- return -ENXIO;
-}
-
-static void __exit cleanup_dbox2_flash(void)
-{
- if (mymtd) {
- mtd_device_unregister(mymtd);
- map_destroy(mymtd);
- }
- if (dbox2_flash_map.virt) {
- iounmap((void *)dbox2_flash_map.virt);
- dbox2_flash_map.virt = 0;
- }
-}
-
-module_init(init_dbox2_flash);
-module_exit(cleanup_dbox2_flash);
-
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Kári Davíðsson <kd@flaga.is>, Bastian Blank <waldi@tuxbox.org>, Alexander Wild <wild@te-elektronik.com>");
-MODULE_DESCRIPTION("MTD map driver for D-Box 2 board");
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
index 080f060..f8a7dd1 100644
--- a/drivers/mtd/maps/dc21285.c
+++ b/drivers/mtd/maps/dc21285.c
@@ -143,9 +143,8 @@ static struct map_info dc21285_map = {
.copy_from = dc21285_copy_from,
};
-
/* Partition stuff */
-static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
+static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
static int __init init_dc21285(void)
{
diff --git a/drivers/mtd/maps/dilnetpc.c b/drivers/mtd/maps/dilnetpc.c
deleted file mode 100644
index 3e393f0..0000000
--- a/drivers/mtd/maps/dilnetpc.c
+++ /dev/null
@@ -1,496 +0,0 @@
-/* dilnetpc.c -- MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP"
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
- *
- * The DIL/Net PC is a tiny embedded PC board made by SSV Embedded Systems
- * featuring the AMD Elan SC410 processor. There are two variants of this
- * board: DNP/1486 and ADNP/1486. The DNP version has 2 megs of flash
- * ROM (Intel 28F016S3) and 8 megs of DRAM, the ADNP version has 4 megs
- * flash and 16 megs of RAM.
- * For details, see http://www.ssv-embedded.de/ssv/pc104/p169.htm
- * and http://www.ssv-embedded.de/ssv/pc104/p170.htm
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/string.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/concat.h>
-
-#include <asm/io.h>
-
-/*
-** The DIL/NetPC keeps its BIOS in two distinct flash blocks.
-** Destroying any of these blocks transforms the DNPC into
-** a paperweight (albeit not a very useful one, considering
-** it only weighs a few grams).
-**
-** Therefore, the BIOS blocks must never be erased or written to
-** except by people who know exactly what they are doing (e.g.
-** to install a BIOS update). These partitions are marked read-only
-** by default, but can be made read/write by undefining
-** DNPC_BIOS_BLOCKS_WRITEPROTECTED:
-*/
-#define DNPC_BIOS_BLOCKS_WRITEPROTECTED
-
-/*
-** The ID string (in ROM) is checked to determine whether we
-** are running on a DNP/1486 or ADNP/1486
-*/
-#define BIOSID_BASE 0x000fe100
-
-#define ID_DNPC "DNP1486"
-#define ID_ADNP "ADNP1486"
-
-/*
-** Address where the flash should appear in CPU space
-*/
-#define FLASH_BASE 0x2000000
-
-/*
-** Chip Setup and Control (CSC) indexed register space
-*/
-#define CSC_INDEX 0x22
-#define CSC_DATA 0x23
-
-#define CSC_MMSWAR 0x30 /* MMS window C-F attributes register */
-#define CSC_MMSWDSR 0x31 /* MMS window C-F device select register */
-
-#define CSC_RBWR 0xa7 /* GPIO Read-Back/Write Register B */
-
-#define CSC_CR 0xd0 /* internal I/O device disable/Echo */
- /* Z-bus/configuration register */
-
-#define CSC_PCCMDCR 0xf1 /* PC card mode and DMA control register */
-
-
-/*
-** PC Card indexed register space:
-*/
-
-#define PCC_INDEX 0x3e0
-#define PCC_DATA 0x3e1
-
-#define PCC_AWER_B 0x46 /* Socket B Address Window enable register */
-#define PCC_MWSAR_1_Lo 0x58 /* memory window 1 start address low register */
-#define PCC_MWSAR_1_Hi 0x59 /* memory window 1 start address high register */
-#define PCC_MWEAR_1_Lo 0x5A /* memory window 1 stop address low register */
-#define PCC_MWEAR_1_Hi 0x5B /* memory window 1 stop address high register */
-#define PCC_MWAOR_1_Lo 0x5C /* memory window 1 address offset low register */
-#define PCC_MWAOR_1_Hi 0x5D /* memory window 1 address offset high register */
-
-
-/*
-** Access to SC4x0's Chip Setup and Control (CSC)
-** and PC Card (PCC) indexed registers:
-*/
-static inline void setcsc(int reg, unsigned char data)
-{
- outb(reg, CSC_INDEX);
- outb(data, CSC_DATA);
-}
-
-static inline unsigned char getcsc(int reg)
-{
- outb(reg, CSC_INDEX);
- return(inb(CSC_DATA));
-}
-
-static inline void setpcc(int reg, unsigned char data)
-{
- outb(reg, PCC_INDEX);
- outb(data, PCC_DATA);
-}
-
-static inline unsigned char getpcc(int reg)
-{
- outb(reg, PCC_INDEX);
- return(inb(PCC_DATA));
-}
-
-
-/*
-************************************************************
-** Enable access to DIL/NetPC's flash by mapping it into
-** the SC4x0's MMS Window C.
-************************************************************
-*/
-static void dnpc_map_flash(unsigned long flash_base, unsigned long flash_size)
-{
- unsigned long flash_end = flash_base + flash_size - 1;
-
- /*
- ** enable setup of MMS windows C-F:
- */
- /* - enable PC Card indexed register space */
- setcsc(CSC_CR, getcsc(CSC_CR) | 0x2);
- /* - set PC Card controller to operate in standard mode */
- setcsc(CSC_PCCMDCR, getcsc(CSC_PCCMDCR) & ~1);
-
- /*
- ** Program base address and end address of window
- ** where the flash ROM should appear in CPU address space
- */
- setpcc(PCC_MWSAR_1_Lo, (flash_base >> 12) & 0xff);
- setpcc(PCC_MWSAR_1_Hi, (flash_base >> 20) & 0x3f);
- setpcc(PCC_MWEAR_1_Lo, (flash_end >> 12) & 0xff);
- setpcc(PCC_MWEAR_1_Hi, (flash_end >> 20) & 0x3f);
-
- /* program offset of first flash location to appear in this window (0) */
- setpcc(PCC_MWAOR_1_Lo, ((0 - flash_base) >> 12) & 0xff);
- setpcc(PCC_MWAOR_1_Hi, ((0 - flash_base)>> 20) & 0x3f);
-
- /* set attributes for MMS window C: non-cacheable, write-enabled */
- setcsc(CSC_MMSWAR, getcsc(CSC_MMSWAR) & ~0x11);
-
- /* select physical device ROMCS0 (i.e. flash) for MMS Window C */
- setcsc(CSC_MMSWDSR, getcsc(CSC_MMSWDSR) & ~0x03);
-
- /* enable memory window 1 */
- setpcc(PCC_AWER_B, getpcc(PCC_AWER_B) | 0x02);
-
- /* now disable PC Card indexed register space again */
- setcsc(CSC_CR, getcsc(CSC_CR) & ~0x2);
-}
-
-
-/*
-************************************************************
-** Disable access to DIL/NetPC's flash by mapping it into
-** the SC4x0's MMS Window C.
-************************************************************
-*/
-static void dnpc_unmap_flash(void)
-{
- /* - enable PC Card indexed register space */
- setcsc(CSC_CR, getcsc(CSC_CR) | 0x2);
-
- /* disable memory window 1 */
- setpcc(PCC_AWER_B, getpcc(PCC_AWER_B) & ~0x02);
-
- /* now disable PC Card indexed register space again */
- setcsc(CSC_CR, getcsc(CSC_CR) & ~0x2);
-}
-
-
-
-/*
-************************************************************
-** Enable/Disable VPP to write to flash
-************************************************************
-*/
-
-static DEFINE_SPINLOCK(dnpc_spin);
-static int vpp_counter = 0;
-/*
-** This is what has to be done for the DNP board ..
-*/
-static void dnp_set_vpp(struct map_info *not_used, int on)
-{
- spin_lock_irq(&dnpc_spin);
-
- if (on)
- {
- if(++vpp_counter == 1)
- setcsc(CSC_RBWR, getcsc(CSC_RBWR) & ~0x4);
- }
- else
- {
- if(--vpp_counter == 0)
- setcsc(CSC_RBWR, getcsc(CSC_RBWR) | 0x4);
- else
- BUG_ON(vpp_counter < 0);
- }
- spin_unlock_irq(&dnpc_spin);
-}
-
-/*
-** .. and this the ADNP version:
-*/
-static void adnp_set_vpp(struct map_info *not_used, int on)
-{
- spin_lock_irq(&dnpc_spin);
-
- if (on)
- {
- if(++vpp_counter == 1)
- setcsc(CSC_RBWR, getcsc(CSC_RBWR) & ~0x8);
- }
- else
- {
- if(--vpp_counter == 0)
- setcsc(CSC_RBWR, getcsc(CSC_RBWR) | 0x8);
- else
- BUG_ON(vpp_counter < 0);
- }
- spin_unlock_irq(&dnpc_spin);
-}
-
-
-
-#define DNP_WINDOW_SIZE 0x00200000 /* DNP flash size is 2MiB */
-#define ADNP_WINDOW_SIZE 0x00400000 /* ADNP flash size is 4MiB */
-#define WINDOW_ADDR FLASH_BASE
-
-static struct map_info dnpc_map = {
- .name = "ADNP Flash Bank",
- .size = ADNP_WINDOW_SIZE,
- .bankwidth = 1,
- .set_vpp = adnp_set_vpp,
- .phys = WINDOW_ADDR
-};
-
-/*
-** The layout of the flash is somewhat "strange":
-**
-** 1. 960 KiB (15 blocks) : Space for ROM Bootloader and user data
-** 2. 64 KiB (1 block) : System BIOS
-** 3. 960 KiB (15 blocks) : User Data (DNP model) or
-** 3. 3008 KiB (47 blocks) : User Data (ADNP model)
-** 4. 64 KiB (1 block) : System BIOS Entry
-*/
-
-static struct mtd_partition partition_info[]=
-{
- {
- .name = "ADNP boot",
- .offset = 0,
- .size = 0xf0000,
- },
- {
- .name = "ADNP system BIOS",
- .offset = MTDPART_OFS_NXTBLK,
- .size = 0x10000,
-#ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED
- .mask_flags = MTD_WRITEABLE,
-#endif
- },
- {
- .name = "ADNP file system",
- .offset = MTDPART_OFS_NXTBLK,
- .size = 0x2f0000,
- },
- {
- .name = "ADNP system BIOS entry",
- .offset = MTDPART_OFS_NXTBLK,
- .size = MTDPART_SIZ_FULL,
-#ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED
- .mask_flags = MTD_WRITEABLE,
-#endif
- },
-};
-
-#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
-
-static struct mtd_info *mymtd;
-static struct mtd_info *lowlvl_parts[NUM_PARTITIONS];
-static struct mtd_info *merged_mtd;
-
-/*
-** "Highlevel" partition info:
-**
-** Using the MTD concat layer, we can re-arrange partitions to our
-** liking: we construct a virtual MTD device by concatenating the
-** partitions, specifying the sequence such that the boot block
-** is immediately followed by the filesystem block (i.e. the stupid
-** system BIOS block is mapped to a different place). When re-partitioning
-** this concatenated MTD device, we can set the boot block size to
-** an arbitrary (though erase block aligned) value i.e. not one that
-** is dictated by the flash's physical layout. We can thus set the
-** boot block to be e.g. 64 KB (which is fully sufficient if we want
-** to boot an etherboot image) or to -say- 1.5 MB if we want to boot
-** a large kernel image. In all cases, the remainder of the flash
-** is available as file system space.
-*/
-
-static struct mtd_partition higlvl_partition_info[]=
-{
- {
- .name = "ADNP boot block",
- .offset = 0,
- .size = CONFIG_MTD_DILNETPC_BOOTSIZE,
- },
- {
- .name = "ADNP file system space",
- .offset = MTDPART_OFS_NXTBLK,
- .size = ADNP_WINDOW_SIZE-CONFIG_MTD_DILNETPC_BOOTSIZE-0x20000,
- },
- {
- .name = "ADNP system BIOS + BIOS Entry",
- .offset = MTDPART_OFS_NXTBLK,
- .size = MTDPART_SIZ_FULL,
-#ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED
- .mask_flags = MTD_WRITEABLE,
-#endif
- },
-};
-
-#define NUM_HIGHLVL_PARTITIONS ARRAY_SIZE(higlvl_partition_info)
-
-
-static int dnp_adnp_probe(void)
-{
- char *biosid, rc = -1;
-
- biosid = (char*)ioremap(BIOSID_BASE, 16);
- if(biosid)
- {
- if(!strcmp(biosid, ID_DNPC))
- rc = 1; /* this is a DNPC */
- else if(!strcmp(biosid, ID_ADNP))
- rc = 0; /* this is a ADNPC */
- }
- iounmap((void *)biosid);
- return(rc);
-}
-
-
-static int __init init_dnpc(void)
-{
- int is_dnp;
-
- /*
- ** determine hardware (DNP/ADNP/invalid)
- */
- if((is_dnp = dnp_adnp_probe()) < 0)
- return -ENXIO;
-
- /*
- ** Things are set up for ADNP by default
- ** -> modify all that needs to be different for DNP
- */
- if(is_dnp)
- { /*
- ** Adjust window size, select correct set_vpp function.
- ** The partitioning scheme is identical on both DNP
- ** and ADNP except for the size of the third partition.
- */
- int i;
- dnpc_map.size = DNP_WINDOW_SIZE;
- dnpc_map.set_vpp = dnp_set_vpp;
- partition_info[2].size = 0xf0000;
-
- /*
- ** increment all string pointers so the leading 'A' gets skipped,
- ** thus turning all occurrences of "ADNP ..." into "DNP ..."
- */
- ++dnpc_map.name;
- for(i = 0; i < NUM_PARTITIONS; i++)
- ++partition_info[i].name;
- higlvl_partition_info[1].size = DNP_WINDOW_SIZE -
- CONFIG_MTD_DILNETPC_BOOTSIZE - 0x20000;
- for(i = 0; i < NUM_HIGHLVL_PARTITIONS; i++)
- ++higlvl_partition_info[i].name;
- }
-
- printk(KERN_NOTICE "DIL/Net %s flash: 0x%lx at 0x%llx\n",
- is_dnp ? "DNPC" : "ADNP", dnpc_map.size, (unsigned long long)dnpc_map.phys);
-
- dnpc_map.virt = ioremap_nocache(dnpc_map.phys, dnpc_map.size);
-
- dnpc_map_flash(dnpc_map.phys, dnpc_map.size);
-
- if (!dnpc_map.virt) {
- printk("Failed to ioremap_nocache\n");
- return -EIO;
- }
- simple_map_init(&dnpc_map);
-
- printk("FLASH virtual address: 0x%p\n", dnpc_map.virt);
-
- mymtd = do_map_probe("jedec_probe", &dnpc_map);
-
- if (!mymtd)
- mymtd = do_map_probe("cfi_probe", &dnpc_map);
-
- /*
- ** If flash probes fail, try to make flashes accessible
- ** at least as ROM. Ajust erasesize in this case since
- ** the default one (128M) will break our partitioning
- */
- if (!mymtd)
- if((mymtd = do_map_probe("map_rom", &dnpc_map)))
- mymtd->erasesize = 0x10000;
-
- if (!mymtd) {
- iounmap(dnpc_map.virt);
- return -ENXIO;
- }
-
- mymtd->owner = THIS_MODULE;
-
- /*
- ** Supply pointers to lowlvl_parts[] array to add_mtd_partitions()
- ** -> add_mtd_partitions() will _not_ register MTD devices for
- ** the partitions, but will instead store pointers to the MTD
- ** objects it creates into our lowlvl_parts[] array.
- ** NOTE: we arrange the pointers such that the sequence of the
- ** partitions gets re-arranged: partition #2 follows
- ** partition #0.
- */
- partition_info[0].mtdp = &lowlvl_parts[0];
- partition_info[1].mtdp = &lowlvl_parts[2];
- partition_info[2].mtdp = &lowlvl_parts[1];
- partition_info[3].mtdp = &lowlvl_parts[3];
-
- mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
-
- /*
- ** now create a virtual MTD device by concatenating the for partitions
- ** (in the sequence given by the lowlvl_parts[] array.
- */
- merged_mtd = mtd_concat_create(lowlvl_parts, NUM_PARTITIONS, "(A)DNP Flash Concatenated");
- if(merged_mtd)
- { /*
- ** now partition the new device the way we want it. This time,
- ** we do not supply mtd pointers in higlvl_partition_info, so
- ** add_mtd_partitions() will register the devices.
- */
- mtd_device_register(merged_mtd, higlvl_partition_info,
- NUM_HIGHLVL_PARTITIONS);
- }
-
- return 0;
-}
-
-static void __exit cleanup_dnpc(void)
-{
- if(merged_mtd) {
- mtd_device_unregister(merged_mtd);
- mtd_concat_destroy(merged_mtd);
- }
-
- if (mymtd) {
- mtd_device_unregister(mymtd);
- map_destroy(mymtd);
- }
- if (dnpc_map.virt) {
- iounmap(dnpc_map.virt);
- dnpc_unmap_flash();
- dnpc_map.virt = NULL;
- }
-}
-
-module_init(init_dnpc);
-module_exit(cleanup_dnpc);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Sysgo Real-Time Solutions GmbH");
-MODULE_DESCRIPTION("MTD map driver for SSV DIL/NetPC DNP & ADNP");
diff --git a/drivers/mtd/maps/dmv182.c b/drivers/mtd/maps/dmv182.c
deleted file mode 100644
index 6538ac6..0000000
--- a/drivers/mtd/maps/dmv182.c
+++ /dev/null
@@ -1,146 +0,0 @@
-
-/*
- * drivers/mtd/maps/dmv182.c
- *
- * Flash map driver for the Dy4 SVME182 board
- *
- * Copyright 2003-2004, TimeSys Corporation
- *
- * Based on the SVME181 flash map, by Tom Nelson, Dot4, Inc. for TimeSys Corp.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <asm/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <linux/errno.h>
-
-/*
- * This driver currently handles only the 16MiB user flash bank 1 on the
- * board. It does not provide access to bank 0 (contains the Dy4 FFW), bank 2
- * (VxWorks boot), or the optional 48MiB expansion flash.
- *
- * scott.wood@timesys.com: On the newer boards with 128MiB flash, it
- * now supports the first 96MiB (the boot flash bank containing FFW
- * is excluded). The VxWorks loader is in partition 1.
- */
-
-#define FLASH_BASE_ADDR 0xf0000000
-#define FLASH_BANK_SIZE (128*1024*1024)
-
-MODULE_AUTHOR("Scott Wood, TimeSys Corporation <scott.wood@timesys.com>");
-MODULE_DESCRIPTION("User-programmable flash device on the Dy4 SVME182 board");
-MODULE_LICENSE("GPL");
-
-static struct map_info svme182_map = {
- .name = "Dy4 SVME182",
- .bankwidth = 32,
- .size = 128 * 1024 * 1024
-};
-
-#define BOOTIMAGE_PART_SIZE ((6*1024*1024)-RESERVED_PART_SIZE)
-
-// Allow 6MiB for the kernel
-#define NEW_BOOTIMAGE_PART_SIZE (6 * 1024 * 1024)
-// Allow 1MiB for the bootloader
-#define NEW_BOOTLOADER_PART_SIZE (1024 * 1024)
-// Use the remaining 9MiB at the end of flash for the RFS
-#define NEW_RFS_PART_SIZE (0x01000000 - NEW_BOOTLOADER_PART_SIZE - \
- NEW_BOOTIMAGE_PART_SIZE)
-
-static struct mtd_partition svme182_partitions[] = {
- // The Lower PABS is only 128KiB, but the partition code doesn't
- // like partitions that don't end on the largest erase block
- // size of the device, even if all of the erase blocks in the
- // partition are small ones. The hardware should prevent
- // writes to the actual PABS areas.
- {
- name: "Lower PABS and CPU 0 bootloader or kernel",
- size: 6*1024*1024,
- offset: 0,
- },
- {
- name: "Root Filesystem",
- size: 10*1024*1024,
- offset: MTDPART_OFS_NXTBLK
- },
- {
- name: "CPU1 Bootloader",
- size: 1024*1024,
- offset: MTDPART_OFS_NXTBLK,
- },
- {
- name: "Extra",
- size: 110*1024*1024,
- offset: MTDPART_OFS_NXTBLK
- },
- {
- name: "Foundation Firmware and Upper PABS",
- size: 1024*1024,
- offset: MTDPART_OFS_NXTBLK,
- mask_flags: MTD_WRITEABLE // read-only
- }
-};
-
-static struct mtd_info *this_mtd;
-
-static int __init init_svme182(void)
-{
- struct mtd_partition *partitions;
- int num_parts = ARRAY_SIZE(svme182_partitions);
-
- partitions = svme182_partitions;
-
- svme182_map.virt = ioremap(FLASH_BASE_ADDR, svme182_map.size);
-
- if (svme182_map.virt == 0) {
- printk("Failed to ioremap FLASH memory area.\n");
- return -EIO;
- }
-
- simple_map_init(&svme182_map);
-
- this_mtd = do_map_probe("cfi_probe", &svme182_map);
- if (!this_mtd)
- {
- iounmap((void *)svme182_map.virt);
- return -ENXIO;
- }
-
- printk(KERN_NOTICE "SVME182 flash device: %dMiB at 0x%08x\n",
- this_mtd->size >> 20, FLASH_BASE_ADDR);
-
- this_mtd->owner = THIS_MODULE;
- mtd_device_register(this_mtd, partitions, num_parts);
-
- return 0;
-}
-
-static void __exit cleanup_svme182(void)
-{
- if (this_mtd)
- {
- mtd_device_unregister(this_mtd);
- map_destroy(this_mtd);
- }
-
- if (svme182_map.virt)
- {
- iounmap((void *)svme182_map.virt);
- svme182_map.virt = 0;
- }
-
- return;
-}
-
-module_init(init_svme182);
-module_exit(cleanup_svme182);
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index 7b643de..5ede282 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -157,7 +157,8 @@ static void gf_copy_to(struct map_info *map, unsigned long to,
memcpy_toio(map->virt + (to % state->win_size), from, len);
}
-static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
+static const char * const part_probe_types[] = {
+ "cmdlinepart", "RedBoot", NULL };
/**
* gpio_flash_probe() - setup a mapping for a GPIO assisted flash
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c
deleted file mode 100644
index 8ed6cb4..0000000
--- a/drivers/mtd/maps/h720x-flash.c
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Flash memory access on Hynix GMS30C7201/HMS30C7202 based
- * evaluation boards
- *
- * (C) 2002 Jungjun Kim <jungjun.kim@hynix.com>
- * 2003 Thomas Gleixner <tglx@linutronix.de>
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <mach/hardware.h>
-#include <asm/io.h>
-
-static struct mtd_info *mymtd;
-
-static struct map_info h720x_map = {
- .name = "H720X",
- .bankwidth = 4,
- .size = H720X_FLASH_SIZE,
- .phys = H720X_FLASH_PHYS,
-};
-
-static struct mtd_partition h720x_partitions[] = {
- {
- .name = "ArMon",
- .size = 0x00080000,
- .offset = 0,
- .mask_flags = MTD_WRITEABLE
- },{
- .name = "Env",
- .size = 0x00040000,
- .offset = 0x00080000,
- .mask_flags = MTD_WRITEABLE
- },{
- .name = "Kernel",
- .size = 0x00180000,
- .offset = 0x000c0000,
- .mask_flags = MTD_WRITEABLE
- },{
- .name = "Ramdisk",
- .size = 0x00400000,
- .offset = 0x00240000,
- .mask_flags = MTD_WRITEABLE
- },{
- .name = "jffs2",
- .size = MTDPART_SIZ_FULL,
- .offset = MTDPART_OFS_APPEND
- }
-};
-
-#define NUM_PARTITIONS ARRAY_SIZE(h720x_partitions)
-
-/*
- * Initialize FLASH support
- */
-static int __init h720x_mtd_init(void)
-{
- h720x_map.virt = ioremap(h720x_map.phys, h720x_map.size);
-
- if (!h720x_map.virt) {
- printk(KERN_ERR "H720x-MTD: ioremap failed\n");
- return -EIO;
- }
-
- simple_map_init(&h720x_map);
-
- // Probe for flash bankwidth 4
- printk (KERN_INFO "H720x-MTD probing 32bit FLASH\n");
- mymtd = do_map_probe("cfi_probe", &h720x_map);
- if (!mymtd) {
- printk (KERN_INFO "H720x-MTD probing 16bit FLASH\n");
- // Probe for bankwidth 2
- h720x_map.bankwidth = 2;
- mymtd = do_map_probe("cfi_probe", &h720x_map);
- }
-
- if (mymtd) {
- mymtd->owner = THIS_MODULE;
-
- mtd_device_parse_register(mymtd, NULL, NULL,
- h720x_partitions, NUM_PARTITIONS);
- return 0;
- }
-
- iounmap((void *)h720x_map.virt);
- return -ENXIO;
-}
-
-/*
- * Cleanup
- */
-static void __exit h720x_mtd_cleanup(void)
-{
-
- if (mymtd) {
- mtd_device_unregister(mymtd);
- map_destroy(mymtd);
- }
-
- if (h720x_map.virt) {
- iounmap((void *)h720x_map.virt);
- h720x_map.virt = 0;
- }
-}
-
-
-module_init(h720x_mtd_init);
-module_exit(h720x_mtd_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
-MODULE_DESCRIPTION("MTD map driver for Hynix evaluation boards");
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index 834a06c..4968674 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -24,14 +24,12 @@
#define NUM_FLASHBANKS 2
#define BUSWIDTH 4
-/* can be { "cfi_probe", "jedec_probe", "map_rom", NULL } */
-#define PROBETYPES { "jedec_probe", NULL }
-
#define MSG_PREFIX "impA7:" /* prefix for our printk()'s */
#define MTDID "impa7-%d" /* for mtdparts= partitioning */
static struct mtd_info *impa7_mtd[NUM_FLASHBANKS];
+static const char * const rom_probe_types[] = { "jedec_probe", NULL };
static struct map_info impa7_map[NUM_FLASHBANKS] = {
{
@@ -60,8 +58,7 @@ static struct mtd_partition partitions[] =
static int __init init_impa7(void)
{
- static const char *rom_probe_types[] = PROBETYPES;
- const char **type;
+ const char * const *type;
int i;
static struct { u_long addr; u_long size; } pt[NUM_FLASHBANKS] = {
{ WINDOW_ADDR0, WINDOW_SIZE0 },
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index b14053b..f581ac1 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -82,9 +82,9 @@ static void vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
static int vr_nor_mtd_setup(struct vr_nor_mtd *p)
{
- static const char *probe_types[] =
+ static const char * const probe_types[] =
{ "cfi_probe", "jedec_probe", NULL };
- const char **type;
+ const char * const *type;
for (type = probe_types; !p->info && *type; type++)
p->info = do_map_probe(*type, &p->map);
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
deleted file mode 100644
index 4a41ced..0000000
--- a/drivers/mtd/maps/ixp2000.c
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * drivers/mtd/maps/ixp2000.c
- *
- * Mapping for the Intel XScale IXP2000 based systems
- *
- * Copyright (C) 2002 Intel Corp.
- * Copyright (C) 2003-2004 MontaVista Software, Inc.
- *
- * Original Author: Naeem M Afzal <naeem.m.afzal@intel.com>
- * Maintainer: Deepak Saxena <dsaxena@plexity.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/io.h>
-#include <mach/hardware.h>
-#include <asm/mach/flash.h>
-
-#include <linux/reboot.h>
-
-struct ixp2000_flash_info {
- struct mtd_info *mtd;
- struct map_info map;
- struct resource *res;
-};
-
-static inline unsigned long flash_bank_setup(struct map_info *map, unsigned long ofs)
-{
- unsigned long (*set_bank)(unsigned long) =
- (unsigned long(*)(unsigned long))map->map_priv_2;
-
- return (set_bank ? set_bank(ofs) : ofs);
-}
-
-#ifdef __ARMEB__
-/*
- * Rev A0 and A1 of IXP2400 silicon have a broken addressing unit which
- * causes the lower address bits to be XORed with 0x11 on 8 bit accesses
- * and XORed with 0x10 on 16 bit accesses. See the spec update, erratum 44.
- */
-static int erratum44_workaround = 0;
-
-static inline unsigned long address_fix8_write(unsigned long addr)
-{
- if (erratum44_workaround) {
- return (addr ^ 3);
- }
- return addr;
-}
-#else
-
-#define address_fix8_write(x) (x)
-#endif
-
-static map_word ixp2000_flash_read8(struct map_info *map, unsigned long ofs)
-{
- map_word val;
-
- val.x[0] = *((u8 *)(map->map_priv_1 + flash_bank_setup(map, ofs)));
- return val;
-}
-
-/*
- * We can't use the standard memcpy due to the broken SlowPort
- * address translation on rev A0 and A1 silicon and the fact that
- * we have banked flash.
- */
-static void ixp2000_flash_copy_from(struct map_info *map, void *to,
- unsigned long from, ssize_t len)
-{
- from = flash_bank_setup(map, from);
- while(len--)
- *(__u8 *) to++ = *(__u8 *)(map->map_priv_1 + from++);
-}
-
-static void ixp2000_flash_write8(struct map_info *map, map_word d, unsigned long ofs)
-{
- *(__u8 *) (address_fix8_write(map->map_priv_1 +
- flash_bank_setup(map, ofs))) = d.x[0];
-}
-
-static void ixp2000_flash_copy_to(struct map_info *map, unsigned long to,
- const void *from, ssize_t len)
-{
- to = flash_bank_setup(map, to);
- while(len--) {
- unsigned long tmp = address_fix8_write(map->map_priv_1 + to++);
- *(__u8 *)(tmp) = *(__u8 *)(from++);
- }
-}
-
-
-static int ixp2000_flash_remove(struct platform_device *dev)
-{
- struct flash_platform_data *plat = dev->dev.platform_data;
- struct ixp2000_flash_info *info = platform_get_drvdata(dev);
-
- platform_set_drvdata(dev, NULL);
-
- if(!info)
- return 0;
-
- if (info->mtd) {
- mtd_device_unregister(info->mtd);
- map_destroy(info->mtd);
- }
- if (info->map.map_priv_1)
- iounmap((void *) info->map.map_priv_1);
-
- if (info->res) {
- release_resource(info->res);
- kfree(info->res);
- }
-
- if (plat->exit)
- plat->exit();
-
- return 0;
-}
-
-
-static int ixp2000_flash_probe(struct platform_device *dev)
-{
- static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
- struct ixp2000_flash_data *ixp_data = dev->dev.platform_data;
- struct flash_platform_data *plat;
- struct ixp2000_flash_info *info;
- unsigned long window_size;
- int err = -1;
-
- if (!ixp_data)
- return -ENODEV;
-
- plat = ixp_data->platform_data;
- if (!plat)
- return -ENODEV;
-
- window_size = resource_size(dev->resource);
- dev_info(&dev->dev, "Probe of IXP2000 flash(%d banks x %dMiB)\n",
- ixp_data->nr_banks, ((u32)window_size >> 20));
-
- if (plat->width != 1) {
- dev_err(&dev->dev, "IXP2000 MTD map only supports 8-bit mode, asking for %d\n",
- plat->width * 8);
- return -EIO;
- }
-
- info = kzalloc(sizeof(struct ixp2000_flash_info), GFP_KERNEL);
- if(!info) {
- err = -ENOMEM;
- goto Error;
- }
-
- platform_set_drvdata(dev, info);
-
- /*
- * Tell the MTD layer we're not 1:1 mapped so that it does
- * not attempt to do a direct access on us.
- */
- info->map.phys = NO_XIP;
-
- info->map.size = ixp_data->nr_banks * window_size;
- info->map.bankwidth = 1;
-
- /*
- * map_priv_2 is used to store a ptr to the bank_setup routine
- */
- info->map.map_priv_2 = (unsigned long) ixp_data->bank_setup;
-
- info->map.name = dev_name(&dev->dev);
- info->map.read = ixp2000_flash_read8;
- info->map.write = ixp2000_flash_write8;
- info->map.copy_from = ixp2000_flash_copy_from;
- info->map.copy_to = ixp2000_flash_copy_to;
-
- info->res = request_mem_region(dev->resource->start,
- resource_size(dev->resource),
- dev_name(&dev->dev));
- if (!info->res) {
- dev_err(&dev->dev, "Could not reserve memory region\n");
- err = -ENOMEM;
- goto Error;
- }
-
- info->map.map_priv_1 =
- (unsigned long)ioremap(dev->resource->start,
- resource_size(dev->resource));
- if (!info->map.map_priv_1) {
- dev_err(&dev->dev, "Failed to ioremap flash region\n");
- err = -EIO;
- goto Error;
- }
-
-#if defined(__ARMEB__)
- /*
- * Enable erratum 44 workaround for NPUs with broken slowport
- */
-
- erratum44_workaround = ixp2000_has_broken_slowport();
- dev_info(&dev->dev, "Erratum 44 workaround %s\n",
- erratum44_workaround ? "enabled" : "disabled");
-#endif
-
- info->mtd = do_map_probe(plat->map_name, &info->map);
- if (!info->mtd) {
- dev_err(&dev->dev, "map_probe failed\n");
- err = -ENXIO;
- goto Error;
- }
- info->mtd->owner = THIS_MODULE;
-
- err = mtd_device_parse_register(info->mtd, probes, NULL, NULL, 0);
- if (err)
- goto Error;
-
- return 0;
-
-Error:
- ixp2000_flash_remove(dev);
- return err;
-}
-
-static struct platform_driver ixp2000_flash_driver = {
- .probe = ixp2000_flash_probe,
- .remove = ixp2000_flash_remove,
- .driver = {
- .name = "IXP2000-Flash",
- .owner = THIS_MODULE,
- },
-};
-
-module_platform_driver(ixp2000_flash_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
-MODULE_ALIAS("platform:IXP2000-Flash");
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index e864fc6..52b3410 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -148,7 +148,7 @@ struct ixp4xx_flash_info {
struct resource *res;
};
-static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
+static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
static int ixp4xx_flash_remove(struct platform_device *dev)
{
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index d1da6ed..d7ac65d 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -46,8 +46,7 @@ struct ltq_mtd {
};
static const char ltq_map_name[] = "ltq_nor";
-static const char *ltq_probe_types[] = {
- "cmdlinepart", "ofpart", NULL };
+static const char * const ltq_probe_types[] = { "cmdlinepart", "ofpart", NULL };
static map_word
ltq_read16(struct map_info *map, unsigned long adr)
diff --git a/drivers/mtd/maps/mbx860.c b/drivers/mtd/maps/mbx860.c
deleted file mode 100644
index 93fa56c..0000000
--- a/drivers/mtd/maps/mbx860.c
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Handle mapping of the flash on MBX860 boards
- *
- * Author: Anton Todorov
- * Copyright: (C) 2001 Emness Technology
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-
-#define WINDOW_ADDR 0xfe000000
-#define WINDOW_SIZE 0x00200000
-
-/* Flash / Partition sizing */
-#define MAX_SIZE_KiB 8192
-#define BOOT_PARTITION_SIZE_KiB 512
-#define KERNEL_PARTITION_SIZE_KiB 5632
-#define APP_PARTITION_SIZE_KiB 2048
-
-#define NUM_PARTITIONS 3
-
-/* partition_info gives details on the logical partitions that the split the
- * single flash device into. If the size if zero we use up to the end of the
- * device. */
-static struct mtd_partition partition_info[]={
- { .name = "MBX flash BOOT partition",
- .offset = 0,
- .size = BOOT_PARTITION_SIZE_KiB*1024 },
- { .name = "MBX flash DATA partition",
- .offset = BOOT_PARTITION_SIZE_KiB*1024,
- .size = (KERNEL_PARTITION_SIZE_KiB)*1024 },
- { .name = "MBX flash APPLICATION partition",
- .offset = (BOOT_PARTITION_SIZE_KiB+KERNEL_PARTITION_SIZE_KiB)*1024 }
-};
-
-
-static struct mtd_info *mymtd;
-
-struct map_info mbx_map = {
- .name = "MBX flash",
- .size = WINDOW_SIZE,
- .phys = WINDOW_ADDR,
- .bankwidth = 4,
-};
-
-static int __init init_mbx(void)
-{
- printk(KERN_NOTICE "Motorola MBX flash device: 0x%x at 0x%x\n", WINDOW_SIZE*4, WINDOW_ADDR);
- mbx_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4);
-
- if (!mbx_map.virt) {
- printk("Failed to ioremap\n");
- return -EIO;
- }
- simple_map_init(&mbx_map);
-
- mymtd = do_map_probe("jedec_probe", &mbx_map);
- if (mymtd) {
- mymtd->owner = THIS_MODULE;
- mtd_device_register(mymtd, NULL, 0);
- mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
- return 0;
- }
-
- iounmap((void *)mbx_map.virt);
- return -ENXIO;
-}
-
-static void __exit cleanup_mbx(void)
-{
- if (mymtd) {
- mtd_device_unregister(mymtd);
- map_destroy(mymtd);
- }
- if (mbx_map.virt) {
- iounmap((void *)mbx_map.virt);
- mbx_map.virt = 0;
- }
-}
-
-module_init(init_mbx);
-module_exit(cleanup_mbx);
-
-MODULE_AUTHOR("Anton Todorov <a.todorov@emness.com>");
-MODULE_DESCRIPTION("MTD map driver for Motorola MBX860 board");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index c3aebd5..c2604f8 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -283,8 +283,7 @@ static int mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (err)
goto release;
- /* tsk - do_map_probe should take const char * */
- mtd = do_map_probe((char *)info->map_name, &map->map);
+ mtd = do_map_probe(info->map_name, &map->map);
err = -ENODEV;
if (!mtd)
goto release;
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 21b0b71..e7a592c 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -87,21 +87,18 @@ static void physmap_set_vpp(struct map_info *map, int state)
spin_unlock_irqrestore(&info->vpp_lock, flags);
}
-static const char *rom_probe_types[] = {
- "cfi_probe",
- "jedec_probe",
- "qinfo_probe",
- "map_rom",
- NULL };
-static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", "afs",
- NULL };
+static const char * const rom_probe_types[] = {
+ "cfi_probe", "jedec_probe", "qinfo_probe", "map_rom", NULL };
+
+static const char * const part_probe_types[] = {
+ "cmdlinepart", "RedBoot", "afs", NULL };
static int physmap_flash_probe(struct platform_device *dev)
{
struct physmap_flash_data *physmap_data;
struct physmap_flash_info *info;
- const char **probe_type;
- const char **part_types;
+ const char * const *probe_type;
+ const char * const *part_types;
int err = 0;
int i;
int devices_found = 0;
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 363939d..d111097 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -71,6 +71,9 @@ static int of_flash_remove(struct platform_device *dev)
return 0;
}
+static const char * const rom_probe_types[] = {
+ "cfi_probe", "jedec_probe", "map_rom" };
+
/* Helper function to handle probing of the obsolete "direct-mapped"
* compatible binding, which has an extra "probe-type" property
* describing the type of flash probe necessary. */
@@ -80,8 +83,6 @@ static struct mtd_info *obsolete_probe(struct platform_device *dev,
struct device_node *dp = dev->dev.of_node;
const char *of_probe;
struct mtd_info *mtd;
- static const char *rom_probe_types[]
- = { "cfi_probe", "jedec_probe", "map_rom"};
int i;
dev_warn(&dev->dev, "Device tree uses obsolete \"direct-mapped\" "
@@ -111,9 +112,10 @@ static struct mtd_info *obsolete_probe(struct platform_device *dev,
specifies the list of partition probers to use. If none is given then the
default is use. These take precedence over other device tree
information. */
-static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot",
- "ofpart", "ofoldpart", NULL };
-static const char **of_get_probes(struct device_node *dp)
+static const char * const part_probe_types_def[] = {
+ "cmdlinepart", "RedBoot", "ofpart", "ofoldpart", NULL };
+
+static const char * const *of_get_probes(struct device_node *dp)
{
const char *cp;
int cplen;
@@ -142,7 +144,7 @@ static const char **of_get_probes(struct device_node *dp)
return res;
}
-static void of_free_probes(const char **probes)
+static void of_free_probes(const char * const *probes)
{
if (probes != part_probe_types_def)
kfree(probes);
@@ -151,7 +153,7 @@ static void of_free_probes(const char **probes)
static struct of_device_id of_flash_match[];
static int of_flash_probe(struct platform_device *dev)
{
- const char **part_probe_types;
+ const char * const *part_probe_types;
const struct of_device_id *match;
struct device_node *dp = dev->dev.of_node;
struct resource res;
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 2de66b0..71fdda2 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -199,7 +199,7 @@ static int platram_probe(struct platform_device *pdev)
* supplied by the platform_data struct */
if (pdata->map_probes) {
- const char **map_probes = pdata->map_probes;
+ const char * const *map_probes = pdata->map_probes;
for ( ; !info->mtd && *map_probes; map_probes++)
info->mtd = do_map_probe(*map_probes , &info->map);
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 43e3dbb..acb1dbc 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -45,9 +45,7 @@ struct pxa2xx_flash_info {
struct map_info map;
};
-
-static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
-
+static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
static int pxa2xx_flash_probe(struct platform_device *pdev)
{
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index 49c3fe7..ac02fbf 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -45,14 +45,15 @@ static int rbtx4939_flash_remove(struct platform_device *dev)
return 0;
}
-static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
+static const char * const rom_probe_types[] = {
+ "cfi_probe", "jedec_probe", NULL };
static int rbtx4939_flash_probe(struct platform_device *dev)
{
struct rbtx4939_flash_data *pdata;
struct rbtx4939_flash_info *info;
struct resource *res;
- const char **probe_type;
+ const char * const *probe_type;
int err = 0;
unsigned long size;
diff --git a/drivers/mtd/maps/rpxlite.c b/drivers/mtd/maps/rpxlite.c
deleted file mode 100644
index ed88225..0000000
--- a/drivers/mtd/maps/rpxlite.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Handle mapping of the flash on the RPX Lite and CLLF boards
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-
-
-#define WINDOW_ADDR 0xfe000000
-#define WINDOW_SIZE 0x800000
-
-static struct mtd_info *mymtd;
-
-static struct map_info rpxlite_map = {
- .name = "RPX",
- .size = WINDOW_SIZE,
- .bankwidth = 4,
- .phys = WINDOW_ADDR,
-};
-
-static int __init init_rpxlite(void)
-{
- printk(KERN_NOTICE "RPX Lite or CLLF flash device: %x at %x\n", WINDOW_SIZE*4, WINDOW_ADDR);
- rpxlite_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4);
-
- if (!rpxlite_map.virt) {
- printk("Failed to ioremap\n");
- return -EIO;
- }
- simple_map_init(&rpxlite_map);
- mymtd = do_map_probe("cfi_probe", &rpxlite_map);
- if (mymtd) {
- mymtd->owner = THIS_MODULE;
- mtd_device_register(mymtd, NULL, 0);
- return 0;
- }
-
- iounmap((void *)rpxlite_map.virt);
- return -ENXIO;
-}
-
-static void __exit cleanup_rpxlite(void)
-{
- if (mymtd) {
- mtd_device_unregister(mymtd);
- map_destroy(mymtd);
- }
- if (rpxlite_map.virt) {
- iounmap((void *)rpxlite_map.virt);
- rpxlite_map.virt = 0;
- }
-}
-
-module_init(init_rpxlite);
-module_exit(cleanup_rpxlite);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Arnold Christensen <AKC@pel.dk>");
-MODULE_DESCRIPTION("MTD map driver for RPX Lite and CLLF boards");
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index f694417..29e3dca 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -244,7 +244,7 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
return ERR_PTR(ret);
}
-static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
+static const char * const part_probes[] = { "cmdlinepart", "RedBoot", NULL };
static int sa1100_mtd_probe(struct platform_device *pdev)
{
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c
index 9d900ad..83a7a70 100644
--- a/drivers/mtd/maps/solutionengine.c
+++ b/drivers/mtd/maps/solutionengine.c
@@ -31,7 +31,7 @@ struct map_info soleng_flash_map = {
.bankwidth = 4,
};
-static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
+static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
#ifdef CONFIG_MTD_SUPERH_RESERVE
static struct mtd_partition superh_se_partitions[] = {
diff --git a/drivers/mtd/maps/tqm8xxl.c b/drivers/mtd/maps/tqm8xxl.c
deleted file mode 100644
index d785879..0000000
--- a/drivers/mtd/maps/tqm8xxl.c
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
- * Handle mapping of the flash memory access routines
- * on TQM8xxL based devices.
- *
- * based on rpxlite.c
- *
- * Copyright(C) 2001 Kirk Lee <kirk@hpc.ee.ntu.edu.tw>
- *
- * This code is GPLed
- *
- */
-
-/*
- * According to TQM8xxL hardware manual, TQM8xxL series have
- * following flash memory organisations:
- * | capacity | | chip type | | bank0 | | bank1 |
- * 2MiB 512Kx16 2MiB 0
- * 4MiB 1Mx16 4MiB 0
- * 8MiB 1Mx16 4MiB 4MiB
- * Thus, we choose CONFIG_MTD_CFI_I2 & CONFIG_MTD_CFI_B4 at
- * kernel configuration.
- */
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/io.h>
-
-#define FLASH_ADDR 0x40000000
-#define FLASH_SIZE 0x00800000
-#define FLASH_BANK_MAX 4
-
-// trivial struct to describe partition information
-struct mtd_part_def
-{
- int nums;
- unsigned char *type;
- struct mtd_partition* mtd_part;
-};
-
-//static struct mtd_info *mymtd;
-static struct mtd_info* mtd_banks[FLASH_BANK_MAX];
-static struct map_info* map_banks[FLASH_BANK_MAX];
-static struct mtd_part_def part_banks[FLASH_BANK_MAX];
-static unsigned long num_banks;
-static void __iomem *start_scan_addr;
-
-/*
- * Here are partition information for all known TQM8xxL series devices.
- * See include/linux/mtd/partitions.h for definition of the mtd_partition
- * structure.
- *
- * The *_max_flash_size is the maximum possible mapped flash size which
- * is not necessarily the actual flash size. It must correspond to the
- * value specified in the mapping definition defined by the
- * "struct map_desc *_io_desc" for the corresponding machine.
- */
-
-/* Currently, TQM8xxL has up to 8MiB flash */
-static unsigned long tqm8xxl_max_flash_size = 0x00800000;
-
-/* partition definition for first flash bank
- * (cf. "drivers/char/flash_config.c")
- */
-static struct mtd_partition tqm8xxl_partitions[] = {
- {
- .name = "ppcboot",
- .offset = 0x00000000,
- .size = 0x00020000, /* 128KB */
- .mask_flags = MTD_WRITEABLE, /* force read-only */
- },
- {
- .name = "kernel", /* default kernel image */
- .offset = 0x00020000,
- .size = 0x000e0000,
- .mask_flags = MTD_WRITEABLE, /* force read-only */
- },
- {
- .name = "user",
- .offset = 0x00100000,
- .size = 0x00100000,
- },
- {
- .name = "initrd",
- .offset = 0x00200000,
- .size = 0x00200000,
- }
-};
-/* partition definition for second flash bank */
-static struct mtd_partition tqm8xxl_fs_partitions[] = {
- {
- .name = "cramfs",
- .offset = 0x00000000,
- .size = 0x00200000,
- },
- {
- .name = "jffs",
- .offset = 0x00200000,
- .size = 0x00200000,
- //.size = MTDPART_SIZ_FULL,
- }
-};
-
-static int __init init_tqm_mtd(void)
-{
- int idx = 0, ret = 0;
- unsigned long flash_addr, flash_size, mtd_size = 0;
- /* pointer to TQM8xxL board info data */
- bd_t *bd = (bd_t *)__res;
-
- flash_addr = bd->bi_flashstart;
- flash_size = bd->bi_flashsize;
-
- //request maximum flash size address space
- start_scan_addr = ioremap(flash_addr, flash_size);
- if (!start_scan_addr) {
- printk(KERN_WARNING "%s:Failed to ioremap address:0x%x\n", __func__, flash_addr);
- return -EIO;
- }
-
- for (idx = 0 ; idx < FLASH_BANK_MAX ; idx++) {
- if(mtd_size >= flash_size)
- break;
-
- printk(KERN_INFO "%s: chip probing count %d\n", __func__, idx);
-
- map_banks[idx] = kzalloc(sizeof(struct map_info), GFP_KERNEL);
- if(map_banks[idx] == NULL) {
- ret = -ENOMEM;
- /* FIXME: What if some MTD devices were probed already? */
- goto error_mem;
- }
-
- map_banks[idx]->name = kmalloc(16, GFP_KERNEL);
-
- if (!map_banks[idx]->name) {
- ret = -ENOMEM;
- /* FIXME: What if some MTD devices were probed already? */
- goto error_mem;
- }
- sprintf(map_banks[idx]->name, "TQM8xxL%d", idx);
-
- map_banks[idx]->size = flash_size;
- map_banks[idx]->bankwidth = 4;
-
- simple_map_init(map_banks[idx]);
-
- map_banks[idx]->virt = start_scan_addr;
- map_banks[idx]->phys = flash_addr;
- /* FIXME: This looks utterly bogus, but I'm trying to
- preserve the behaviour of the original (shown here)...
-
- map_banks[idx]->map_priv_1 =
- start_scan_addr + ((idx > 0) ?
- (mtd_banks[idx-1] ? mtd_banks[idx-1]->size : 0) : 0);
- */
-
- if (idx && mtd_banks[idx-1]) {
- map_banks[idx]->virt += mtd_banks[idx-1]->size;
- map_banks[idx]->phys += mtd_banks[idx-1]->size;
- }
-
- //start to probe flash chips
- mtd_banks[idx] = do_map_probe("cfi_probe", map_banks[idx]);
-
- if (mtd_banks[idx]) {
- mtd_banks[idx]->owner = THIS_MODULE;
- mtd_size += mtd_banks[idx]->size;
- num_banks++;
-
- printk(KERN_INFO "%s: bank%d, name:%s, size:%dbytes \n", __func__, num_banks,
- mtd_banks[idx]->name, mtd_banks[idx]->size);
- }
- }
-
- /* no supported flash chips found */
- if (!num_banks) {
- printk(KERN_NOTICE "TQM8xxL: No support flash chips found!\n");
- ret = -ENXIO;
- goto error_mem;
- }
-
- /*
- * Select Static partition definitions
- */
- part_banks[0].mtd_part = tqm8xxl_partitions;
- part_banks[0].type = "Static image";
- part_banks[0].nums = ARRAY_SIZE(tqm8xxl_partitions);
-
- part_banks[1].mtd_part = tqm8xxl_fs_partitions;
- part_banks[1].type = "Static file system";
- part_banks[1].nums = ARRAY_SIZE(tqm8xxl_fs_partitions);
-
- for(idx = 0; idx < num_banks ; idx++) {
- if (part_banks[idx].nums == 0)
- printk(KERN_NOTICE "TQM flash%d: no partition info available, registering whole flash at once\n", idx);
- else
- printk(KERN_NOTICE "TQM flash%d: Using %s partition definition\n",
- idx, part_banks[idx].type);
- mtd_device_register(mtd_banks[idx], part_banks[idx].mtd_part,
- part_banks[idx].nums);
- }
- return 0;
-error_mem:
- for(idx = 0 ; idx < FLASH_BANK_MAX ; idx++) {
- if(map_banks[idx] != NULL) {
- kfree(map_banks[idx]->name);
- map_banks[idx]->name = NULL;
- kfree(map_banks[idx]);
- map_banks[idx] = NULL;
- }
- }
-error:
- iounmap(start_scan_addr);
- return ret;
-}
-
-static void __exit cleanup_tqm_mtd(void)
-{
- unsigned int idx = 0;
- for(idx = 0 ; idx < num_banks ; idx++) {
- /* destroy mtd_info previously allocated */
- if (mtd_banks[idx]) {
- mtd_device_unregister(mtd_banks[idx]);
- map_destroy(mtd_banks[idx]);
- }
- /* release map_info not used anymore */
- kfree(map_banks[idx]->name);
- kfree(map_banks[idx]);
- }
-
- if (start_scan_addr) {
- iounmap(start_scan_addr);
- start_scan_addr = 0;
- }
-}
-
-module_init(init_tqm_mtd);
-module_exit(cleanup_tqm_mtd);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Kirk Lee <kirk@hpc.ee.ntu.edu.tw>");
-MODULE_DESCRIPTION("MTD map driver for TQM8xxL boards");
diff --git a/drivers/mtd/maps/tsunami_flash.c b/drivers/mtd/maps/tsunami_flash.c
index 1de390e..da2cdb5 100644
--- a/drivers/mtd/maps/tsunami_flash.c
+++ b/drivers/mtd/maps/tsunami_flash.c
@@ -82,11 +82,12 @@ static void __exit cleanup_tsunami_flash(void)
tsunami_flash_mtd = 0;
}
+static const char * const rom_probe_types[] = {
+ "cfi_probe", "jedec_probe", "map_rom", NULL };
static int __init init_tsunami_flash(void)
{
- static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", "map_rom", NULL };
- char **type;
+ const char * const *type;
tsunami_tig_writeb(FLASH_ENABLE_BYTE, FLASH_ENABLE_PORT);
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index dc571eb..c719879 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -38,6 +38,8 @@
#include <asm/uaccess.h>
+#include "mtdcore.h"
+
static DEFINE_MUTEX(mtd_mutex);
/*
@@ -365,37 +367,35 @@ static void mtdchar_erase_callback (struct erase_info *instr)
wake_up((wait_queue_head_t *)instr->priv);
}
-#ifdef CONFIG_HAVE_MTD_OTP
static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
{
struct mtd_info *mtd = mfi->mtd;
size_t retlen;
- int ret = 0;
-
- /*
- * Make a fake call to mtd_read_fact_prot_reg() to check if OTP
- * operations are supported.
- */
- if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) == -EOPNOTSUPP)
- return -EOPNOTSUPP;
switch (mode) {
case MTD_OTP_FACTORY:
+ if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) ==
+ -EOPNOTSUPP)
+ return -EOPNOTSUPP;
+
mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
break;
case MTD_OTP_USER:
+ if (mtd_read_user_prot_reg(mtd, -1, 0, &retlen, NULL) ==
+ -EOPNOTSUPP)
+ return -EOPNOTSUPP;
+
mfi->mode = MTD_FILE_MODE_OTP_USER;
break;
- default:
- ret = -EINVAL;
case MTD_OTP_OFF:
+ mfi->mode = MTD_FILE_MODE_NORMAL;
break;
+ default:
+ return -EINVAL;
}
- return ret;
+
+ return 0;
}
-#else
-# define otp_select_filemode(f,m) -EOPNOTSUPP
-#endif
static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
uint64_t start, uint32_t length, void __user *ptr,
@@ -888,7 +888,6 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
break;
}
-#ifdef CONFIG_HAVE_MTD_OTP
case OTPSELECT:
{
int mode;
@@ -944,7 +943,6 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
break;
}
-#endif
/* This ioctl is being deprecated - it truncates the ECC layout */
case ECCGETLAYOUT:
@@ -1185,23 +1183,25 @@ static struct file_system_type mtd_inodefs_type = {
};
MODULE_ALIAS_FS("mtd_inodefs");
-static int __init init_mtdchar(void)
+int __init init_mtdchar(void)
{
int ret;
ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
"mtd", &mtd_fops);
if (ret < 0) {
- pr_notice("Can't allocate major number %d for "
- "Memory Technology Devices.\n", MTD_CHAR_MAJOR);
+ pr_err("Can't allocate major number %d for MTD\n",
+ MTD_CHAR_MAJOR);
return ret;
}
ret = register_filesystem(&mtd_inodefs_type);
if (ret) {
- pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret);
+ pr_err("Can't register mtd_inodefs filesystem, error %d\n",
+ ret);
goto err_unregister_chdev;
}
+
return ret;
err_unregister_chdev:
@@ -1209,18 +1209,10 @@ err_unregister_chdev:
return ret;
}
-static void __exit cleanup_mtdchar(void)
+void __exit cleanup_mtdchar(void)
{
unregister_filesystem(&mtd_inodefs_type);
__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
}
-module_init(init_mtdchar);
-module_exit(cleanup_mtdchar);
-
-MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
-MODULE_DESCRIPTION("Direct character-device access to MTD devices");
MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 322ca65..c400c57 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -42,6 +42,7 @@
#include <linux/mtd/partitions.h>
#include "mtdcore.h"
+
/*
* backing device capabilities for non-mappable devices (such as NAND flash)
* - permits private mappings, copies are taken of the data
@@ -97,11 +98,7 @@ EXPORT_SYMBOL_GPL(__mtd_next_device);
static LIST_HEAD(mtd_notifiers);
-#if defined(CONFIG_MTD_CHAR) || defined(CONFIG_MTD_CHAR_MODULE)
#define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
-#else
-#define MTD_DEVT(index) 0
-#endif
/* REVISIT once MTD uses the driver model better, whoever allocates
* the mtd_info will probably want to use the release() hook...
@@ -493,7 +490,7 @@ out_error:
*
* Returns zero in case of success and a negative error code in case of failure.
*/
-int mtd_device_parse_register(struct mtd_info *mtd, const char **types,
+int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
struct mtd_part_parser_data *parser_data,
const struct mtd_partition *parts,
int nr_parts)
@@ -1117,8 +1114,6 @@ EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
/*====================================================================*/
/* Support for /proc/mtd */
-static struct proc_dir_entry *proc_mtd;
-
static int mtd_proc_show(struct seq_file *m, void *v)
{
struct mtd_info *mtd;
@@ -1164,6 +1159,8 @@ static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name)
return ret;
}
+static struct proc_dir_entry *proc_mtd;
+
static int __init init_mtd(void)
{
int ret;
@@ -1184,11 +1181,17 @@ static int __init init_mtd(void)
if (ret)
goto err_bdi3;
-#ifdef CONFIG_PROC_FS
proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
-#endif /* CONFIG_PROC_FS */
+
+ ret = init_mtdchar();
+ if (ret)
+ goto out_procfs;
+
return 0;
+out_procfs:
+ if (proc_mtd)
+ remove_proc_entry("mtd", NULL);
err_bdi3:
bdi_destroy(&mtd_bdi_ro_mappable);
err_bdi2:
@@ -1202,10 +1205,9 @@ err_reg:
static void __exit cleanup_mtd(void)
{
-#ifdef CONFIG_PROC_FS
+ cleanup_mtdchar();
if (proc_mtd)
- remove_proc_entry( "mtd", NULL);
-#endif /* CONFIG_PROC_FS */
+ remove_proc_entry("mtd", NULL);
class_unregister(&mtd_class);
bdi_destroy(&mtd_bdi_unmappable);
bdi_destroy(&mtd_bdi_ro_mappable);
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index 961a384..7b03533 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -1,23 +1,21 @@
-/* linux/drivers/mtd/mtdcore.h
- *
- * Header file for driver private mtdcore exports
- *
+/*
+ * These are exported solely for the purpose of mtd_blkdevs.c and mtdchar.c.
+ * You should not use them for _anything_ else.
*/
-/* These are exported solely for the purpose of mtd_blkdevs.c. You
- should not use them for _anything_ else */
-
extern struct mutex mtd_table_mutex;
-extern struct mtd_info *__mtd_next_device(int i);
-extern int add_mtd_device(struct mtd_info *mtd);
-extern int del_mtd_device(struct mtd_info *mtd);
-extern int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *,
- int);
-extern int del_mtd_partitions(struct mtd_info *);
-extern int parse_mtd_partitions(struct mtd_info *master, const char **types,
- struct mtd_partition **pparts,
- struct mtd_part_parser_data *data);
+struct mtd_info *__mtd_next_device(int i);
+int add_mtd_device(struct mtd_info *mtd);
+int del_mtd_device(struct mtd_info *mtd);
+int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
+int del_mtd_partitions(struct mtd_info *);
+int parse_mtd_partitions(struct mtd_info *master, const char * const *types,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data);
+
+int __init init_mtdchar(void);
+void __exit cleanup_mtdchar(void);
#define mtd_for_each_device(mtd) \
for ((mtd) = __mtd_next_device(0); \
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 70fa70a..3014933 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -694,7 +694,7 @@ EXPORT_SYMBOL_GPL(deregister_mtd_parser);
* Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
* are changing this array!
*/
-static const char *default_mtd_part_types[] = {
+static const char * const default_mtd_part_types[] = {
"cmdlinepart",
"ofpart",
NULL
@@ -720,7 +720,7 @@ static const char *default_mtd_part_types[] = {
* o a positive number of found partitions, in which case on exit @pparts will
* point to an array containing this number of &struct mtd_info objects.
*/
-int parse_mtd_partitions(struct mtd_info *master, const char **types,
+int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 81bf5e5..a60f6c1 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -41,14 +41,6 @@ config MTD_SM_COMMON
tristate
default n
-config MTD_NAND_MUSEUM_IDS
- bool "Enable chip ids for obsolete ancient NAND devices"
- default n
- help
- Enable this option only when your board has first generation
- NAND chips (page size 256 byte, erase size 4-8KiB). The IDs
- of these chips were reused by later, larger chips.
-
config MTD_NAND_DENALI
tristate "Support Denali NAND controller"
help
@@ -81,15 +73,9 @@ config MTD_NAND_DENALI_SCRATCH_REG_ADDR
scratch register here to enable this feature. On Intel Moorestown
boards, the scratch register is at 0xFF108018.
-config MTD_NAND_H1900
- tristate "iPAQ H1900 flash"
- depends on ARCH_PXA && BROKEN
- help
- This enables the driver for the iPAQ h1900 flash.
-
config MTD_NAND_GPIO
tristate "GPIO NAND Flash driver"
- depends on GENERIC_GPIO && ARM
+ depends on GPIOLIB && ARM
help
This enables a GPIO based NAND flash driver.
@@ -201,22 +187,6 @@ config MTD_NAND_BF5XX_BOOTROM_ECC
If unsure, say N.
-config MTD_NAND_RTC_FROM4
- tristate "Renesas Flash ROM 4-slot interface board (FROM_BOARD4)"
- depends on SH_SOLUTION_ENGINE
- select REED_SOLOMON
- select REED_SOLOMON_DEC8
- select BITREVERSE
- help
- This enables the driver for the Renesas Technology AG-AND
- flash interface board (FROM_BOARD4)
-
-config MTD_NAND_PPCHAMELEONEVB
- tristate "NAND Flash device on PPChameleonEVB board"
- depends on PPCHAMELEONEVB && BROKEN
- help
- This enables the NAND flash driver on the PPChameleon EVB Board.
-
config MTD_NAND_S3C2410
tristate "NAND Flash support for Samsung S3C SoCs"
depends on ARCH_S3C24XX || ARCH_S3C64XX
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index d76d912..bb81891 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -15,14 +15,11 @@ obj-$(CONFIG_MTD_NAND_DENALI_PCI) += denali_pci.o
obj-$(CONFIG_MTD_NAND_DENALI_DT) += denali_dt.o
obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o
-obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o
obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o
obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o
obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
obj-$(CONFIG_MTD_NAND_DOCG4) += docg4.o
obj-$(CONFIG_MTD_NAND_FSMC) += fsmc_nand.o
-obj-$(CONFIG_MTD_NAND_H1900) += h1910.o
-obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o
obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o
obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o
obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index ffcbcca..2d23d29 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -1737,20 +1737,7 @@ static struct platform_driver atmel_nand_driver = {
},
};
-static int __init atmel_nand_init(void)
-{
- return platform_driver_probe(&atmel_nand_driver, atmel_nand_probe);
-}
-
-
-static void __exit atmel_nand_exit(void)
-{
- platform_driver_unregister(&atmel_nand_driver);
-}
-
-
-module_init(atmel_nand_init);
-module_exit(atmel_nand_exit);
+module_platform_driver_probe(atmel_nand_driver, atmel_nand_probe);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Rick Bronson");
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 4271e94..776df36 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -874,21 +874,7 @@ static struct platform_driver bf5xx_nand_driver = {
},
};
-static int __init bf5xx_nand_init(void)
-{
- printk(KERN_INFO "%s, Version %s (c) 2007 Analog Devices, Inc.\n",
- DRV_DESC, DRV_VERSION);
-
- return platform_driver_register(&bf5xx_nand_driver);
-}
-
-static void __exit bf5xx_nand_exit(void)
-{
- platform_driver_unregister(&bf5xx_nand_driver);
-}
-
-module_init(bf5xx_nand_init);
-module_exit(bf5xx_nand_exit);
+module_platform_driver(bf5xx_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR(DRV_AUTHOR);
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 010d612..c34985a 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -303,13 +303,7 @@ static void cafe_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
case NAND_CMD_SEQIN:
case NAND_CMD_RNDIN:
case NAND_CMD_STATUS:
- case NAND_CMD_DEPLETE1:
case NAND_CMD_RNDOUT:
- case NAND_CMD_STATUS_ERROR:
- case NAND_CMD_STATUS_ERROR0:
- case NAND_CMD_STATUS_ERROR1:
- case NAND_CMD_STATUS_ERROR2:
- case NAND_CMD_STATUS_ERROR3:
cafe_writel(cafe, cafe->ctl2, NAND_CTRL2);
return;
}
@@ -536,8 +530,8 @@ static int cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
}
static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page,
- int cached, int raw)
+ uint32_t offset, int data_len, const uint8_t *buf,
+ int oob_required, int page, int cached, int raw)
{
int status;
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 94e17af..c3e15a5 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -34,6 +34,7 @@
#include <linux/mtd/partitions.h>
#include <linux/slab.h>
#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_data/mtd-davinci.h>
#include <linux/platform_data/mtd-davinci-aemif.h>
@@ -577,7 +578,6 @@ static struct davinci_nand_pdata
return pdev->dev.platform_data;
}
#else
-#define davinci_nand_of_match NULL
static struct davinci_nand_pdata
*nand_davinci_get_pdata(struct platform_device *pdev)
{
@@ -878,22 +878,12 @@ static struct platform_driver nand_davinci_driver = {
.driver = {
.name = "davinci_nand",
.owner = THIS_MODULE,
- .of_match_table = davinci_nand_of_match,
+ .of_match_table = of_match_ptr(davinci_nand_of_match),
},
};
MODULE_ALIAS("platform:davinci_nand");
-static int __init nand_davinci_init(void)
-{
- return platform_driver_probe(&nand_davinci_driver, nand_davinci_probe);
-}
-module_init(nand_davinci_init);
-
-static void __exit nand_davinci_exit(void)
-{
- platform_driver_unregister(&nand_davinci_driver);
-}
-module_exit(nand_davinci_exit);
+module_platform_driver_probe(nand_davinci_driver, nand_davinci_probe);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c
index 546f8cb..9253024 100644
--- a/drivers/mtd/nand/denali_dt.c
+++ b/drivers/mtd/nand/denali_dt.c
@@ -42,7 +42,7 @@ static void __iomem *request_and_map(struct device *dev,
}
ptr = devm_ioremap_nocache(dev, res->start, resource_size(res));
- if (!res)
+ if (!ptr)
dev_err(dev, "ioremap_nocache of %s failed!", res->name);
return ptr;
@@ -90,7 +90,7 @@ static int denali_dt_probe(struct platform_device *ofdev)
denali->irq = platform_get_irq(ofdev, 0);
if (denali->irq < 0) {
dev_err(&ofdev->dev, "no irq defined\n");
- return -ENXIO;
+ return denali->irq;
}
denali->flash_reg = request_and_map(&ofdev->dev, denali_reg);
@@ -146,21 +146,11 @@ static struct platform_driver denali_dt_driver = {
.driver = {
.name = "denali-nand-dt",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(denali_nand_dt_ids),
+ .of_match_table = denali_nand_dt_ids,
},
};
-static int __init denali_init_dt(void)
-{
- return platform_driver_register(&denali_dt_driver);
-}
-module_init(denali_init_dt);
-
-static void __exit denali_exit_dt(void)
-{
- platform_driver_unregister(&denali_dt_driver);
-}
-module_exit(denali_exit_dt);
+module_platform_driver(denali_dt_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jamie Iles");
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index 18fa448..fa25e7a 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -1397,18 +1397,7 @@ static struct platform_driver docg4_driver = {
.remove = __exit_p(cleanup_docg4),
};
-static int __init docg4_init(void)
-{
- return platform_driver_probe(&docg4_driver, probe_docg4);
-}
-
-static void __exit docg4_exit(void)
-{
- platform_driver_unregister(&docg4_driver);
-}
-
-module_init(docg4_init);
-module_exit(docg4_exit);
+module_platform_driver_probe(docg4_driver, probe_docg4);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mike Dunn");
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 05ba3f0..911e243 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -1235,18 +1235,7 @@ static struct platform_driver fsmc_nand_driver = {
},
};
-static int __init fsmc_nand_init(void)
-{
- return platform_driver_probe(&fsmc_nand_driver,
- fsmc_nand_probe);
-}
-module_init(fsmc_nand_init);
-
-static void __exit fsmc_nand_exit(void)
-{
- platform_driver_unregister(&fsmc_nand_driver);
-}
-module_exit(fsmc_nand_exit);
+module_platform_driver_probe(fsmc_nand_driver, fsmc_nand_probe);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vipin Kumar <vipin.kumar@st.com>, Ashish Priyadarshi");
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index e789e3f..89065dd 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -190,7 +190,6 @@ static struct resource *gpio_nand_get_io_sync_of(struct platform_device *pdev)
return r;
}
#else /* CONFIG_OF */
-#define gpio_nand_id_table NULL
static inline int gpio_nand_get_config_of(const struct device *dev,
struct gpio_nand_platdata *plat)
{
@@ -259,8 +258,6 @@ static int gpio_nand_remove(struct platform_device *dev)
if (gpio_is_valid(gpiomtd->plat.gpio_rdy))
gpio_free(gpiomtd->plat.gpio_rdy);
- kfree(gpiomtd);
-
return 0;
}
@@ -297,7 +294,7 @@ static int gpio_nand_probe(struct platform_device *dev)
if (!res0)
return -EINVAL;
- gpiomtd = kzalloc(sizeof(*gpiomtd), GFP_KERNEL);
+ gpiomtd = devm_kzalloc(&dev->dev, sizeof(*gpiomtd), GFP_KERNEL);
if (gpiomtd == NULL) {
dev_err(&dev->dev, "failed to create NAND MTD\n");
return -ENOMEM;
@@ -412,7 +409,6 @@ err_sync:
iounmap(gpiomtd->nand_chip.IO_ADDR_R);
release_mem_region(res0->start, resource_size(res0));
err_map:
- kfree(gpiomtd);
return ret;
}
@@ -421,7 +417,7 @@ static struct platform_driver gpio_nand_driver = {
.remove = gpio_nand_remove,
.driver = {
.name = "gpio-nand",
- .of_match_table = gpio_nand_id_table,
+ .of_match_table = of_match_ptr(gpio_nand_id_table),
},
};
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c
deleted file mode 100644
index 50166e9..0000000
--- a/drivers/mtd/nand/h1910.c
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * drivers/mtd/nand/h1910.c
- *
- * Copyright (C) 2003 Joshua Wise (joshua@joshuawise.com)
- *
- * Derived from drivers/mtd/nand/edb7312.c
- * Copyright (C) 2002 Marius Gröger (mag@sysgo.de)
- * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Overview:
- * This is a device driver for the NAND flash device found on the
- * iPAQ h1910 board which utilizes the Samsung K9F2808 part. This is
- * a 128Mibit (16MiB x 8 bits) NAND flash device.
- */
-
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-#include <mach/hardware.h>
-#include <asm/sizes.h>
-#include <mach/h1900-gpio.h>
-#include <mach/ipaq.h>
-
-/*
- * MTD structure for EDB7312 board
- */
-static struct mtd_info *h1910_nand_mtd = NULL;
-
-/*
- * Module stuff
- */
-
-/*
- * Define static partitions for flash device
- */
-static struct mtd_partition partition_info[] = {
- {name:"h1910 NAND Flash",
- offset:0,
- size:16 * 1024 * 1024}
-};
-
-#define NUM_PARTITIONS 1
-
-/*
- * hardware specific access to control-lines
- *
- * NAND_NCE: bit 0 - don't care
- * NAND_CLE: bit 1 - address bit 2
- * NAND_ALE: bit 2 - address bit 3
- */
-static void h1910_hwcontrol(struct mtd_info *mtd, int cmd,
- unsigned int ctrl)
-{
- struct nand_chip *chip = mtd->priv;
-
- if (cmd != NAND_CMD_NONE)
- writeb(cmd, chip->IO_ADDR_W | ((ctrl & 0x6) << 1));
-}
-
-/*
- * read device ready pin
- */
-#if 0
-static int h1910_device_ready(struct mtd_info *mtd)
-{
- return (GPLR(55) & GPIO_bit(55));
-}
-#endif
-
-/*
- * Main initialization routine
- */
-static int __init h1910_init(void)
-{
- struct nand_chip *this;
- void __iomem *nandaddr;
-
- if (!machine_is_h1900())
- return -ENODEV;
-
- nandaddr = ioremap(0x08000000, 0x1000);
- if (!nandaddr) {
- printk("Failed to ioremap nand flash.\n");
- return -ENOMEM;
- }
-
- /* Allocate memory for MTD device structure and private data */
- h1910_nand_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
- if (!h1910_nand_mtd) {
- printk("Unable to allocate h1910 NAND MTD device structure.\n");
- iounmap((void *)nandaddr);
- return -ENOMEM;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *)(&h1910_nand_mtd[1]);
-
- /* Initialize structures */
- memset(h1910_nand_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- h1910_nand_mtd->priv = this;
- h1910_nand_mtd->owner = THIS_MODULE;
-
- /*
- * Enable VPEN
- */
- GPSR(37) = GPIO_bit(37);
-
- /* insert callbacks */
- this->IO_ADDR_R = nandaddr;
- this->IO_ADDR_W = nandaddr;
- this->cmd_ctrl = h1910_hwcontrol;
- this->dev_ready = NULL; /* unknown whether that was correct or not so we will just do it like this */
- /* 15 us command delay time */
- this->chip_delay = 50;
- this->ecc.mode = NAND_ECC_SOFT;
-
- /* Scan to find existence of the device */
- if (nand_scan(h1910_nand_mtd, 1)) {
- printk(KERN_NOTICE "No NAND device - returning -ENXIO\n");
- kfree(h1910_nand_mtd);
- iounmap((void *)nandaddr);
- return -ENXIO;
- }
-
- /* Register the partitions */
- mtd_device_parse_register(h1910_nand_mtd, NULL, NULL, partition_info,
- NUM_PARTITIONS);
-
- /* Return happy */
- return 0;
-}
-
-module_init(h1910_init);
-
-/*
- * Clean up routine
- */
-static void __exit h1910_cleanup(void)
-{
- struct nand_chip *this = (struct nand_chip *)&h1910_nand_mtd[1];
-
- /* Release resources, unregister device */
- nand_release(h1910_nand_mtd);
-
- /* Release io resource */
- iounmap((void *)this->IO_ADDR_W);
-
- /* Free the MTD device structure */
- kfree(h1910_nand_mtd);
-}
-
-module_exit(h1910_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Joshua Wise <joshua at joshuawise dot com>");
-MODULE_DESCRIPTION("NAND flash driver for iPAQ h1910");
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index 0ca22ae..a94facb 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -540,8 +540,8 @@ static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
}
static int lpc32xx_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page,
- int cached, int raw)
+ uint32_t offset, int data_len, const uint8_t *buf,
+ int oob_required, int page, int cached, int raw)
{
int res;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 42c6392..dfcd0a5 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -4,7 +4,6 @@
* Overview:
* This is the generic MTD driver for NAND flash devices. It should be
* capable of working with almost all NAND chips currently available.
- * Basic support for AG-AND chips is provided.
*
* Additional technical information is available on
* http://www.linux-mtd.infradead.org/doc/nand.html
@@ -22,8 +21,6 @@
* Enable cached programming for 2k page size chips
* Check, if mtd->ecctype should be set to MTD_ECC_HW
* if we have HW ECC support.
- * The AG-AND chips have nice features for speed improvement,
- * which are not supported yet. Read / program 4 pages in one go.
* BBT table is not serialized, has to be fixed
*
* This program is free software; you can redistribute it and/or modify
@@ -515,7 +512,7 @@ EXPORT_SYMBOL_GPL(nand_wait_ready);
* @page_addr: the page address for this command, -1 if none
*
* Send command to NAND device. This function is used for small page devices
- * (256/512 Bytes per page).
+ * (512 Bytes per page).
*/
static void nand_command(struct mtd_info *mtd, unsigned int command,
int column, int page_addr)
@@ -631,8 +628,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
}
/* Command latch cycle */
- chip->cmd_ctrl(mtd, command & 0xff,
- NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
if (column != -1 || page_addr != -1) {
int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
@@ -671,16 +667,6 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
case NAND_CMD_SEQIN:
case NAND_CMD_RNDIN:
case NAND_CMD_STATUS:
- case NAND_CMD_DEPLETE1:
- return;
-
- case NAND_CMD_STATUS_ERROR:
- case NAND_CMD_STATUS_ERROR0:
- case NAND_CMD_STATUS_ERROR1:
- case NAND_CMD_STATUS_ERROR2:
- case NAND_CMD_STATUS_ERROR3:
- /* Read error status commands require only a short delay */
- udelay(chip->chip_delay);
return;
case NAND_CMD_RESET:
@@ -836,10 +822,7 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
*/
ndelay(100);
- if ((state == FL_ERASING) && (chip->options & NAND_IS_AND))
- chip->cmdfunc(mtd, NAND_CMD_STATUS_MULTI, -1, -1);
- else
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
if (in_interrupt() || oops_in_progress)
panic_nand_wait(mtd, chip, timeo);
@@ -1127,7 +1110,7 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
}
/**
- * nand_read_subpage - [REPLACEABLE] software ECC based sub-page read function
+ * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
* @mtd: mtd info structure
* @chip: nand chip info structure
* @data_offs: offset of requested data within the page
@@ -1995,6 +1978,67 @@ static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
return 0;
}
+
+/**
+ * nand_write_subpage_hwecc - [REPLACABLE] hardware ECC based subpage write
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @column: column address of subpage within the page
+ * @data_len: data length
+ * @oob_required: must write chip->oob_poi to OOB
+ */
+static int nand_write_subpage_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, uint32_t offset,
+ uint32_t data_len, const uint8_t *data_buf,
+ int oob_required)
+{
+ uint8_t *oob_buf = chip->oob_poi;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ int ecc_steps = chip->ecc.steps;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint32_t start_step = offset / ecc_size;
+ uint32_t end_step = (offset + data_len - 1) / ecc_size;
+ int oob_bytes = mtd->oobsize / ecc_steps;
+ int step, i;
+
+ for (step = 0; step < ecc_steps; step++) {
+ /* configure controller for WRITE access */
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+
+ /* write data (untouched subpages already masked by 0xFF) */
+ chip->write_buf(mtd, data_buf, ecc_size);
+
+ /* mask ECC of un-touched subpages by padding 0xFF */
+ if ((step < start_step) || (step > end_step))
+ memset(ecc_calc, 0xff, ecc_bytes);
+ else
+ chip->ecc.calculate(mtd, data_buf, ecc_calc);
+
+ /* mask OOB of un-touched subpages by padding 0xFF */
+ /* if oob_required, preserve OOB metadata of written subpage */
+ if (!oob_required || (step < start_step) || (step > end_step))
+ memset(oob_buf, 0xff, oob_bytes);
+
+ data_buf += ecc_size;
+ ecc_calc += ecc_bytes;
+ oob_buf += oob_bytes;
+ }
+
+ /* copy calculated ECC for whole page to chip->buffer->oob */
+ /* this include masked-value(0xFF) for unwritten subpages */
+ ecc_calc = chip->buffers->ecccalc;
+ for (i = 0; i < chip->ecc.total; i++)
+ chip->oob_poi[eccpos[i]] = ecc_calc[i];
+
+ /* write OOB buffer to NAND device */
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+
/**
* nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
* @mtd: mtd info structure
@@ -2047,6 +2091,8 @@ static int nand_write_page_syndrome(struct mtd_info *mtd,
* nand_write_page - [REPLACEABLE] write one page
* @mtd: MTD device structure
* @chip: NAND chip descriptor
+ * @offset: address offset within the page
+ * @data_len: length of actual data to be written
* @buf: the data to write
* @oob_required: must write chip->oob_poi to OOB
* @page: page number to write
@@ -2054,15 +2100,25 @@ static int nand_write_page_syndrome(struct mtd_info *mtd,
* @raw: use _raw version of write_page
*/
static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page,
- int cached, int raw)
+ uint32_t offset, int data_len, const uint8_t *buf,
+ int oob_required, int page, int cached, int raw)
{
- int status;
+ int status, subpage;
+
+ if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
+ chip->ecc.write_subpage)
+ subpage = offset || (data_len < mtd->writesize);
+ else
+ subpage = 0;
chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
if (unlikely(raw))
- status = chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
+ status = chip->ecc.write_page_raw(mtd, chip, buf,
+ oob_required);
+ else if (subpage)
+ status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
+ buf, oob_required);
else
status = chip->ecc.write_page(mtd, chip, buf, oob_required);
@@ -2075,7 +2131,7 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
*/
cached = 0;
- if (!cached || !(chip->options & NAND_CACHEPRG)) {
+ if (!cached || !NAND_HAS_CACHEPROG(chip)) {
chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
status = chip->waitfunc(mtd, chip);
@@ -2176,7 +2232,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
uint8_t *oob = ops->oobbuf;
uint8_t *buf = ops->datbuf;
- int ret, subpage;
+ int ret;
int oob_required = oob ? 1 : 0;
ops->retlen = 0;
@@ -2191,10 +2247,6 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
}
column = to & (mtd->writesize - 1);
- subpage = column || (writelen & (mtd->writesize - 1));
-
- if (subpage && oob)
- return -EINVAL;
chipnr = (int)(to >> chip->chip_shift);
chip->select_chip(mtd, chipnr);
@@ -2243,9 +2295,9 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
/* We still need to erase leftover OOB data */
memset(chip->oob_poi, 0xff, mtd->oobsize);
}
-
- ret = chip->write_page(mtd, chip, wbuf, oob_required, page,
- cached, (ops->mode == MTD_OPS_RAW));
+ ret = chip->write_page(mtd, chip, column, bytes, wbuf,
+ oob_required, page, cached,
+ (ops->mode == MTD_OPS_RAW));
if (ret)
break;
@@ -2481,24 +2533,6 @@ static void single_erase_cmd(struct mtd_info *mtd, int page)
}
/**
- * multi_erase_cmd - [GENERIC] AND specific block erase command function
- * @mtd: MTD device structure
- * @page: the page address of the block which will be erased
- *
- * AND multi block erase command function. Erase 4 consecutive blocks.
- */
-static void multi_erase_cmd(struct mtd_info *mtd, int page)
-{
- struct nand_chip *chip = mtd->priv;
- /* Send commands to erase a block */
- chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++);
- chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++);
- chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++);
- chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
- chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
-}
-
-/**
* nand_erase - [MTD Interface] erase block(s)
* @mtd: MTD device structure
* @instr: erase instruction
@@ -2510,7 +2544,6 @@ static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
return nand_erase_nand(mtd, instr, 0);
}
-#define BBT_PAGE_MASK 0xffffff3f
/**
* nand_erase_nand - [INTERN] erase block(s)
* @mtd: MTD device structure
@@ -2524,8 +2557,6 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
{
int page, status, pages_per_block, ret, chipnr;
struct nand_chip *chip = mtd->priv;
- loff_t rewrite_bbt[NAND_MAX_CHIPS] = {0};
- unsigned int bbt_masked_page = 0xffffffff;
loff_t len;
pr_debug("%s: start = 0x%012llx, len = %llu\n",
@@ -2556,15 +2587,6 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
goto erase_exit;
}
- /*
- * If BBT requires refresh, set the BBT page mask to see if the BBT
- * should be rewritten. Otherwise the mask is set to 0xffffffff which
- * can not be matched. This is also done when the bbt is actually
- * erased to avoid recursive updates.
- */
- if (chip->options & BBT_AUTO_REFRESH && !allowbbt)
- bbt_masked_page = chip->bbt_td->pages[chipnr] & BBT_PAGE_MASK;
-
/* Loop through the pages */
len = instr->len;
@@ -2610,15 +2632,6 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
goto erase_exit;
}
- /*
- * If BBT requires refresh, set the BBT rewrite flag to the
- * page being erased.
- */
- if (bbt_masked_page != 0xffffffff &&
- (page & BBT_PAGE_MASK) == bbt_masked_page)
- rewrite_bbt[chipnr] =
- ((loff_t)page << chip->page_shift);
-
/* Increment page address and decrement length */
len -= (1 << chip->phys_erase_shift);
page += pages_per_block;
@@ -2628,15 +2641,6 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
chipnr++;
chip->select_chip(mtd, -1);
chip->select_chip(mtd, chipnr);
-
- /*
- * If BBT requires refresh and BBT-PERCHIP, set the BBT
- * page mask to see if this BBT should be rewritten.
- */
- if (bbt_masked_page != 0xffffffff &&
- (chip->bbt_td->options & NAND_BBT_PERCHIP))
- bbt_masked_page = chip->bbt_td->pages[chipnr] &
- BBT_PAGE_MASK;
}
}
instr->state = MTD_ERASE_DONE;
@@ -2653,23 +2657,6 @@ erase_exit:
if (!ret)
mtd_erase_callback(instr);
- /*
- * If BBT requires refresh and erase was successful, rewrite any
- * selected bad block tables.
- */
- if (bbt_masked_page == 0xffffffff || ret)
- return ret;
-
- for (chipnr = 0; chipnr < chip->numchips; chipnr++) {
- if (!rewrite_bbt[chipnr])
- continue;
- /* Update the BBT for chip */
- pr_debug("%s: nand_update_bbt (%d:0x%0llx 0x%0x)\n",
- __func__, chipnr, rewrite_bbt[chipnr],
- chip->bbt_td->pages[chipnr]);
- nand_update_bbt(mtd, rewrite_bbt[chipnr]);
- }
-
/* Return more or less happy */
return ret;
}
@@ -2905,8 +2892,6 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
chip->onfi_version = 20;
else if (val & (1 << 1))
chip->onfi_version = 10;
- else
- chip->onfi_version = 0;
if (!chip->onfi_version) {
pr_info("%s: unsupported ONFI version: %d\n", __func__, val);
@@ -3171,6 +3156,30 @@ static void nand_decode_bbm_options(struct mtd_info *mtd,
chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
}
+static inline bool is_full_id_nand(struct nand_flash_dev *type)
+{
+ return type->id_len;
+}
+
+static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
+ struct nand_flash_dev *type, u8 *id_data, int *busw)
+{
+ if (!strncmp(type->id, id_data, type->id_len)) {
+ mtd->writesize = type->pagesize;
+ mtd->erasesize = type->erasesize;
+ mtd->oobsize = type->oobsize;
+
+ chip->cellinfo = id_data[2];
+ chip->chipsize = (uint64_t)type->chipsize << 20;
+ chip->options |= type->options;
+
+ *busw = type->options & NAND_BUSWIDTH_16;
+
+ return true;
+ }
+ return false;
+}
+
/*
* Get the flash and manufacturer id and lookup if the type is supported.
*/
@@ -3222,9 +3231,14 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
if (!type)
type = nand_flash_ids;
- for (; type->name != NULL; type++)
- if (*dev_id == type->id)
- break;
+ for (; type->name != NULL; type++) {
+ if (is_full_id_nand(type)) {
+ if (find_full_id_nand(mtd, chip, type, id_data, &busw))
+ goto ident_done;
+ } else if (*dev_id == type->dev_id) {
+ break;
+ }
+ }
chip->onfi_version = 0;
if (!type->name || !type->pagesize) {
@@ -3302,12 +3316,7 @@ ident_done:
}
chip->badblockbits = 8;
-
- /* Check for AND chips with 4 page planes */
- if (chip->options & NAND_4PAGE_ARRAY)
- chip->erase_cmd = multi_erase_cmd;
- else
- chip->erase_cmd = single_erase_cmd;
+ chip->erase_cmd = single_erase_cmd;
/* Do not replace user supplied command function! */
if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
@@ -3474,6 +3483,10 @@ int nand_scan_tail(struct mtd_info *mtd)
chip->ecc.read_oob = nand_read_oob_std;
if (!chip->ecc.write_oob)
chip->ecc.write_oob = nand_write_oob_std;
+ if (!chip->ecc.read_subpage)
+ chip->ecc.read_subpage = nand_read_subpage;
+ if (!chip->ecc.write_subpage)
+ chip->ecc.write_subpage = nand_write_subpage_hwecc;
case NAND_ECC_HW_SYNDROME:
if ((!chip->ecc.calculate || !chip->ecc.correct ||
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 916d6e9..2672643 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -1240,15 +1240,6 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
*/
static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
-static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 };
-
-static struct nand_bbt_descr agand_flashbased = {
- .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
- .offs = 0x20,
- .len = 6,
- .pattern = scan_agand_pattern
-};
-
/* Generic flash bbt descriptors */
static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
@@ -1333,22 +1324,6 @@ int nand_default_bbt(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
- /*
- * Default for AG-AND. We must use a flash based bad block table as the
- * devices have factory marked _good_ blocks. Erasing those blocks
- * leads to loss of the good / bad information, so we _must_ store this
- * information in a good / bad table during startup.
- */
- if (this->options & NAND_IS_AND) {
- /* Use the default pattern descriptors */
- if (!this->bbt_td) {
- this->bbt_td = &bbt_main_descr;
- this->bbt_md = &bbt_mirror_descr;
- }
- this->bbt_options |= NAND_BBT_USE_FLASH;
- return nand_scan_bbt(mtd, &agand_flashbased);
- }
-
/* Is a flash based bad block table requested? */
if (this->bbt_options & NAND_BBT_USE_FLASH) {
/* Use the default pattern descriptors */
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 9c61238..683813a 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -10,163 +10,153 @@
*/
#include <linux/module.h>
#include <linux/mtd/nand.h>
-/*
-* Chip ID list
-*
-* Name. ID code, pagesize, chipsize in MegaByte, eraseblock size,
-* options
-*
-* Pagesize; 0, 256, 512
-* 0 get this information from the extended chip ID
-+ 256 256 Byte page size
-* 512 512 Byte page size
-*/
-struct nand_flash_dev nand_flash_ids[] = {
+#include <linux/sizes.h>
+
+#define LP_OPTIONS NAND_SAMSUNG_LP_OPTIONS
+#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
+
#define SP_OPTIONS NAND_NEED_READRDY
#define SP_OPTIONS16 (SP_OPTIONS | NAND_BUSWIDTH_16)
-#ifdef CONFIG_MTD_NAND_MUSEUM_IDS
- {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, SP_OPTIONS},
- {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, SP_OPTIONS},
- {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, SP_OPTIONS},
- {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, SP_OPTIONS},
- {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, SP_OPTIONS},
- {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, SP_OPTIONS},
- {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, SP_OPTIONS},
- {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, SP_OPTIONS},
- {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, SP_OPTIONS},
- {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, SP_OPTIONS},
-
- {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, SP_OPTIONS},
- {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, SP_OPTIONS},
- {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, SP_OPTIONS16},
- {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, SP_OPTIONS16},
-#endif
-
- {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, SP_OPTIONS},
- {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, SP_OPTIONS},
- {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, SP_OPTIONS16},
- {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, SP_OPTIONS16},
-
- {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, SP_OPTIONS},
- {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, SP_OPTIONS},
- {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, SP_OPTIONS16},
- {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, SP_OPTIONS16},
-
- {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, SP_OPTIONS},
- {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, SP_OPTIONS},
- {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, SP_OPTIONS16},
- {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, SP_OPTIONS16},
-
- {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, SP_OPTIONS},
- {"NAND 128MiB 1,8V 8-bit", 0x39, 512, 128, 0x4000, SP_OPTIONS},
- {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, SP_OPTIONS},
- {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, SP_OPTIONS16},
- {"NAND 128MiB 1,8V 16-bit", 0x49, 512, 128, 0x4000, SP_OPTIONS16},
- {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, SP_OPTIONS16},
- {"NAND 128MiB 3,3V 16-bit", 0x59, 512, 128, 0x4000, SP_OPTIONS16},
-
- {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, SP_OPTIONS},
+/*
+ * The chip ID list:
+ * name, device ID, page size, chip size in MiB, eraseblock size, options
+ *
+ * If page size and eraseblock size are 0, the sizes are taken from the
+ * extended chip ID.
+ */
+struct nand_flash_dev nand_flash_ids[] = {
+ /*
+ * Some incompatible NAND chips share device ID's and so must be
+ * listed by full ID. We list them first so that we can easily identify
+ * the most specific match.
+ */
+ {"TC58NVG2S0F 4G 3.3V 8-bit",
+ { .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} },
+ SZ_4K, SZ_512, SZ_256K, 0, 8, 224},
+ {"TC58NVG3S0F 8G 3.3V 8-bit",
+ { .id = {0x98, 0xd3, 0x90, 0x26, 0x76, 0x15, 0x02, 0x08} },
+ SZ_4K, SZ_1K, SZ_256K, 0, 8, 232},
+ {"TC58NVG5D2 32G 3.3V 8-bit",
+ { .id = {0x98, 0xd7, 0x94, 0x32, 0x76, 0x56, 0x09, 0x00} },
+ SZ_8K, SZ_4K, SZ_1M, 0, 8, 640},
+ {"TC58NVG6D2 64G 3.3V 8-bit",
+ { .id = {0x98, 0xde, 0x94, 0x82, 0x76, 0x56, 0x04, 0x20} },
+ SZ_8K, SZ_8K, SZ_2M, 0, 8, 640},
+
+ LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE5, 4, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xD6, 8, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xE6, 8, SZ_8K, SP_OPTIONS),
+
+ LEGACY_ID_NAND("NAND 16MiB 1,8V 8-bit", 0x33, 16, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 16MiB 3,3V 8-bit", 0x73, 16, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 16MiB 1,8V 16-bit", 0x43, 16, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 16MiB 3,3V 16-bit", 0x53, 16, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 32MiB 1,8V 8-bit", 0x35, 32, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 32MiB 3,3V 8-bit", 0x75, 32, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 32MiB 1,8V 16-bit", 0x45, 32, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 32MiB 3,3V 16-bit", 0x55, 32, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 64MiB 1,8V 8-bit", 0x36, 64, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 64MiB 3,3V 8-bit", 0x76, 64, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 64MiB 1,8V 16-bit", 0x46, 64, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 64MiB 3,3V 16-bit", 0x56, 64, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit", 0x78, 128, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit", 0x39, 128, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 128MiB 3,3V 8-bit", 0x79, 128, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x72, 128, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x49, 128, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x74, 128, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x59, 128, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 256MiB 3,3V 8-bit", 0x71, 256, SZ_16K, SP_OPTIONS),
/*
- * These are the new chips with large page size. The pagesize and the
- * erasesize is determined from the extended id bytes
+ * These are the new chips with large page size. Their page size and
+ * eraseblock size are determined from the extended ID bytes.
*/
-#define LP_OPTIONS NAND_SAMSUNG_LP_OPTIONS
-#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
/* 512 Megabit */
- {"NAND 64MiB 1,8V 8-bit", 0xA2, 0, 64, 0, LP_OPTIONS},
- {"NAND 64MiB 1,8V 8-bit", 0xA0, 0, 64, 0, LP_OPTIONS},
- {"NAND 64MiB 3,3V 8-bit", 0xF2, 0, 64, 0, LP_OPTIONS},
- {"NAND 64MiB 3,3V 8-bit", 0xD0, 0, 64, 0, LP_OPTIONS},
- {"NAND 64MiB 3,3V 8-bit", 0xF0, 0, 64, 0, LP_OPTIONS},
- {"NAND 64MiB 1,8V 16-bit", 0xB2, 0, 64, 0, LP_OPTIONS16},
- {"NAND 64MiB 1,8V 16-bit", 0xB0, 0, 64, 0, LP_OPTIONS16},
- {"NAND 64MiB 3,3V 16-bit", 0xC2, 0, 64, 0, LP_OPTIONS16},
- {"NAND 64MiB 3,3V 16-bit", 0xC0, 0, 64, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit", 0xA2, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit", 0xA0, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xF2, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xD0, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xF0, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB2, 64, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB0, 64, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC2, 64, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC0, 64, LP_OPTIONS16),
/* 1 Gigabit */
- {"NAND 128MiB 1,8V 8-bit", 0xA1, 0, 128, 0, LP_OPTIONS},
- {"NAND 128MiB 3,3V 8-bit", 0xF1, 0, 128, 0, LP_OPTIONS},
- {"NAND 128MiB 3,3V 8-bit", 0xD1, 0, 128, 0, LP_OPTIONS},
- {"NAND 128MiB 1,8V 16-bit", 0xB1, 0, 128, 0, LP_OPTIONS16},
- {"NAND 128MiB 3,3V 16-bit", 0xC1, 0, 128, 0, LP_OPTIONS16},
- {"NAND 128MiB 1,8V 16-bit", 0xAD, 0, 128, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 128MiB 1,8V 8-bit", 0xA1, 128, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit", 0xF1, 128, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit", 0xD1, 128, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xB1, 128, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 128MiB 3,3V 16-bit", 0xC1, 128, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xAD, 128, LP_OPTIONS16),
/* 2 Gigabit */
- {"NAND 256MiB 1,8V 8-bit", 0xAA, 0, 256, 0, LP_OPTIONS},
- {"NAND 256MiB 3,3V 8-bit", 0xDA, 0, 256, 0, LP_OPTIONS},
- {"NAND 256MiB 1,8V 16-bit", 0xBA, 0, 256, 0, LP_OPTIONS16},
- {"NAND 256MiB 3,3V 16-bit", 0xCA, 0, 256, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 256MiB 1,8V 8-bit", 0xAA, 256, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 256MiB 3,3V 8-bit", 0xDA, 256, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 256MiB 1,8V 16-bit", 0xBA, 256, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 256MiB 3,3V 16-bit", 0xCA, 256, LP_OPTIONS16),
/* 4 Gigabit */
- {"NAND 512MiB 1,8V 8-bit", 0xAC, 0, 512, 0, LP_OPTIONS},
- {"NAND 512MiB 3,3V 8-bit", 0xDC, 0, 512, 0, LP_OPTIONS},
- {"NAND 512MiB 1,8V 16-bit", 0xBC, 0, 512, 0, LP_OPTIONS16},
- {"NAND 512MiB 3,3V 16-bit", 0xCC, 0, 512, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 512MiB 1,8V 8-bit", 0xAC, 512, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 512MiB 3,3V 8-bit", 0xDC, 512, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 512MiB 1,8V 16-bit", 0xBC, 512, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 512MiB 3,3V 16-bit", 0xCC, 512, LP_OPTIONS16),
/* 8 Gigabit */
- {"NAND 1GiB 1,8V 8-bit", 0xA3, 0, 1024, 0, LP_OPTIONS},
- {"NAND 1GiB 3,3V 8-bit", 0xD3, 0, 1024, 0, LP_OPTIONS},
- {"NAND 1GiB 1,8V 16-bit", 0xB3, 0, 1024, 0, LP_OPTIONS16},
- {"NAND 1GiB 3,3V 16-bit", 0xC3, 0, 1024, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 1GiB 1,8V 8-bit", 0xA3, 1024, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 1GiB 3,3V 8-bit", 0xD3, 1024, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 1GiB 1,8V 16-bit", 0xB3, 1024, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 1GiB 3,3V 16-bit", 0xC3, 1024, LP_OPTIONS16),
/* 16 Gigabit */
- {"NAND 2GiB 1,8V 8-bit", 0xA5, 0, 2048, 0, LP_OPTIONS},
- {"NAND 2GiB 3,3V 8-bit", 0xD5, 0, 2048, 0, LP_OPTIONS},
- {"NAND 2GiB 1,8V 16-bit", 0xB5, 0, 2048, 0, LP_OPTIONS16},
- {"NAND 2GiB 3,3V 16-bit", 0xC5, 0, 2048, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 2GiB 1,8V 8-bit", 0xA5, 2048, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 2GiB 3,3V 8-bit", 0xD5, 2048, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 2GiB 1,8V 16-bit", 0xB5, 2048, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 2GiB 3,3V 16-bit", 0xC5, 2048, LP_OPTIONS16),
/* 32 Gigabit */
- {"NAND 4GiB 1,8V 8-bit", 0xA7, 0, 4096, 0, LP_OPTIONS},
- {"NAND 4GiB 3,3V 8-bit", 0xD7, 0, 4096, 0, LP_OPTIONS},
- {"NAND 4GiB 1,8V 16-bit", 0xB7, 0, 4096, 0, LP_OPTIONS16},
- {"NAND 4GiB 3,3V 16-bit", 0xC7, 0, 4096, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 4GiB 1,8V 8-bit", 0xA7, 4096, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 4GiB 3,3V 8-bit", 0xD7, 4096, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 4GiB 1,8V 16-bit", 0xB7, 4096, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 4GiB 3,3V 16-bit", 0xC7, 4096, LP_OPTIONS16),
/* 64 Gigabit */
- {"NAND 8GiB 1,8V 8-bit", 0xAE, 0, 8192, 0, LP_OPTIONS},
- {"NAND 8GiB 3,3V 8-bit", 0xDE, 0, 8192, 0, LP_OPTIONS},
- {"NAND 8GiB 1,8V 16-bit", 0xBE, 0, 8192, 0, LP_OPTIONS16},
- {"NAND 8GiB 3,3V 16-bit", 0xCE, 0, 8192, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 8GiB 1,8V 8-bit", 0xAE, 8192, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 8GiB 3,3V 8-bit", 0xDE, 8192, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 8GiB 1,8V 16-bit", 0xBE, 8192, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 8GiB 3,3V 16-bit", 0xCE, 8192, LP_OPTIONS16),
/* 128 Gigabit */
- {"NAND 16GiB 1,8V 8-bit", 0x1A, 0, 16384, 0, LP_OPTIONS},
- {"NAND 16GiB 3,3V 8-bit", 0x3A, 0, 16384, 0, LP_OPTIONS},
- {"NAND 16GiB 1,8V 16-bit", 0x2A, 0, 16384, 0, LP_OPTIONS16},
- {"NAND 16GiB 3,3V 16-bit", 0x4A, 0, 16384, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 16GiB 1,8V 8-bit", 0x1A, 16384, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 16GiB 3,3V 8-bit", 0x3A, 16384, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 16GiB 1,8V 16-bit", 0x2A, 16384, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 16GiB 3,3V 16-bit", 0x4A, 16384, LP_OPTIONS16),
/* 256 Gigabit */
- {"NAND 32GiB 1,8V 8-bit", 0x1C, 0, 32768, 0, LP_OPTIONS},
- {"NAND 32GiB 3,3V 8-bit", 0x3C, 0, 32768, 0, LP_OPTIONS},
- {"NAND 32GiB 1,8V 16-bit", 0x2C, 0, 32768, 0, LP_OPTIONS16},
- {"NAND 32GiB 3,3V 16-bit", 0x4C, 0, 32768, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 32GiB 1,8V 8-bit", 0x1C, 32768, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 32GiB 3,3V 8-bit", 0x3C, 32768, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 32GiB 1,8V 16-bit", 0x2C, 32768, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 32GiB 3,3V 16-bit", 0x4C, 32768, LP_OPTIONS16),
/* 512 Gigabit */
- {"NAND 64GiB 1,8V 8-bit", 0x1E, 0, 65536, 0, LP_OPTIONS},
- {"NAND 64GiB 3,3V 8-bit", 0x3E, 0, 65536, 0, LP_OPTIONS},
- {"NAND 64GiB 1,8V 16-bit", 0x2E, 0, 65536, 0, LP_OPTIONS16},
- {"NAND 64GiB 3,3V 16-bit", 0x4E, 0, 65536, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 64GiB 1,8V 8-bit", 0x1E, 65536, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64GiB 3,3V 8-bit", 0x3E, 65536, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64GiB 1,8V 16-bit", 0x2E, 65536, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64GiB 3,3V 16-bit", 0x4E, 65536, LP_OPTIONS16),
- /*
- * Renesas AND 1 Gigabit. Those chips do not support extended id and
- * have a strange page/block layout ! The chosen minimum erasesize is
- * 4 * 2 * 2048 = 16384 Byte, as those chips have an array of 4 page
- * planes 1 block = 2 pages, but due to plane arrangement the blocks
- * 0-3 consists of page 0 + 4,1 + 5, 2 + 6, 3 + 7 Anyway JFFS2 would
- * increase the eraseblock size so we chose a combined one which can be
- * erased in one go There are more speed improvements for reads and
- * writes possible, but not implemented now
- */
- {"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000,
- NAND_IS_AND | NAND_4PAGE_ARRAY | BBT_AUTO_REFRESH},
-
- {NULL,}
+ {NULL}
};
-/*
-* Manufacturer ID list
-*/
+/* Manufacturer IDs */
struct nand_manufacturers nand_manuf_ids[] = {
{NAND_MFR_TOSHIBA, "Toshiba"},
{NAND_MFR_SAMSUNG, "Samsung"},
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 891c52a..cb38f3d 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -218,7 +218,6 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
#define STATE_CMD_READOOB 0x00000005 /* read OOB area */
#define STATE_CMD_ERASE1 0x00000006 /* sector erase first command */
#define STATE_CMD_STATUS 0x00000007 /* read status */
-#define STATE_CMD_STATUS_M 0x00000008 /* read multi-plane status (isn't implemented) */
#define STATE_CMD_SEQIN 0x00000009 /* sequential data input */
#define STATE_CMD_READID 0x0000000A /* read ID */
#define STATE_CMD_ERASE2 0x0000000B /* sector erase second command */
@@ -263,14 +262,13 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
#define NS_OPER_STATES 6 /* Maximum number of states in operation */
#define OPT_ANY 0xFFFFFFFF /* any chip supports this operation */
-#define OPT_PAGE256 0x00000001 /* 256-byte page chips */
#define OPT_PAGE512 0x00000002 /* 512-byte page chips */
#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */
#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */
#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */
#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
-#define OPT_SMALLPAGE (OPT_PAGE256 | OPT_PAGE512) /* 256 and 512-byte page chips */
+#define OPT_SMALLPAGE (OPT_PAGE512) /* 512-byte page chips */
/* Remove action bits from state */
#define NS_STATE(x) ((x) & ~ACTION_MASK)
@@ -406,8 +404,6 @@ static struct nandsim_operations {
{OPT_ANY, {STATE_CMD_ERASE1, STATE_ADDR_SEC, STATE_CMD_ERASE2 | ACTION_SECERASE, STATE_READY}},
/* Read status */
{OPT_ANY, {STATE_CMD_STATUS, STATE_DATAOUT_STATUS, STATE_READY}},
- /* Read multi-plane status */
- {OPT_SMARTMEDIA, {STATE_CMD_STATUS_M, STATE_DATAOUT_STATUS_M, STATE_READY}},
/* Read ID */
{OPT_ANY, {STATE_CMD_READID, STATE_ADDR_ZERO, STATE_DATAOUT_ID, STATE_READY}},
/* Large page devices read page */
@@ -699,10 +695,7 @@ static int init_nandsim(struct mtd_info *mtd)
ns->geom.secszoob = ns->geom.secsz + ns->geom.oobsz * ns->geom.pgsec;
ns->options = 0;
- if (ns->geom.pgsz == 256) {
- ns->options |= OPT_PAGE256;
- }
- else if (ns->geom.pgsz == 512) {
+ if (ns->geom.pgsz == 512) {
ns->options |= OPT_PAGE512;
if (ns->busw == 8)
ns->options |= OPT_PAGE512_8BIT;
@@ -769,9 +762,9 @@ static int init_nandsim(struct mtd_info *mtd)
}
/* Detect how many ID bytes the NAND chip outputs */
- for (i = 0; nand_flash_ids[i].name != NULL; i++) {
- if (second_id_byte != nand_flash_ids[i].id)
- continue;
+ for (i = 0; nand_flash_ids[i].name != NULL; i++) {
+ if (second_id_byte != nand_flash_ids[i].dev_id)
+ continue;
}
if (ns->busw == 16)
@@ -1079,8 +1072,6 @@ static char *get_state_name(uint32_t state)
return "STATE_CMD_ERASE1";
case STATE_CMD_STATUS:
return "STATE_CMD_STATUS";
- case STATE_CMD_STATUS_M:
- return "STATE_CMD_STATUS_M";
case STATE_CMD_SEQIN:
return "STATE_CMD_SEQIN";
case STATE_CMD_READID:
@@ -1145,7 +1136,6 @@ static int check_command(int cmd)
case NAND_CMD_RNDOUTSTART:
return 0;
- case NAND_CMD_STATUS_MULTI:
default:
return 1;
}
@@ -1171,8 +1161,6 @@ static uint32_t get_state_by_command(unsigned command)
return STATE_CMD_ERASE1;
case NAND_CMD_STATUS:
return STATE_CMD_STATUS;
- case NAND_CMD_STATUS_MULTI:
- return STATE_CMD_STATUS_M;
case NAND_CMD_SEQIN:
return STATE_CMD_SEQIN;
case NAND_CMD_READID:
@@ -2306,7 +2294,7 @@ static int __init ns_init_module(void)
nand->geom.idbytes = 2;
nand->regs.status = NS_STATUS_OK(nand);
nand->nxstate = STATE_UNKNOWN;
- nand->options |= OPT_PAGE256; /* temporary value */
+ nand->options |= OPT_PAGE512; /* temporary value */
nand->ids[0] = first_id_byte;
nand->ids[1] = second_id_byte;
nand->ids[2] = third_id_byte;
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index a619119..cd6be2e 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -177,15 +177,6 @@ static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,
case NAND_CMD_SEQIN:
case NAND_CMD_RNDIN:
case NAND_CMD_STATUS:
- case NAND_CMD_DEPLETE1:
- return;
-
- case NAND_CMD_STATUS_ERROR:
- case NAND_CMD_STATUS_ERROR0:
- case NAND_CMD_STATUS_ERROR1:
- case NAND_CMD_STATUS_ERROR2:
- case NAND_CMD_STATUS_ERROR3:
- udelay(chip->chip_delay);
return;
case NAND_CMD_RESET:
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 8e820dd..81b80af 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1023,9 +1023,9 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
int status, state = this->state;
if (state == FL_ERASING)
- timeo += (HZ * 400) / 1000;
+ timeo += msecs_to_jiffies(400);
else
- timeo += (HZ * 20) / 1000;
+ timeo += msecs_to_jiffies(20);
writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command);
while (time_before(jiffies, timeo)) {
@@ -1701,8 +1701,9 @@ static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
elm_node = of_find_node_by_phandle(be32_to_cpup(parp));
pdev = of_find_device_by_node(elm_node);
info->elm_dev = &pdev->dev;
- elm_config(info->elm_dev, bch_type);
- info->is_elm_used = true;
+
+ if (elm_config(info->elm_dev, bch_type) == 0)
+ info->is_elm_used = true;
}
if (info->is_elm_used && (mtd->writesize <= 4096)) {
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index cd72b92..8fbd002 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -231,18 +231,7 @@ static struct platform_driver orion_nand_driver = {
},
};
-static int __init orion_nand_init(void)
-{
- return platform_driver_probe(&orion_nand_driver, orion_nand_probe);
-}
-
-static void __exit orion_nand_exit(void)
-{
- platform_driver_unregister(&orion_nand_driver);
-}
-
-module_init(orion_nand_init);
-module_exit(orion_nand_exit);
+module_platform_driver_probe(orion_nand_driver, orion_nand_probe);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tzachi Perelstein");
diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c
deleted file mode 100644
index 0ddd90e..0000000
--- a/drivers/mtd/nand/ppchameleonevb.c
+++ /dev/null
@@ -1,403 +0,0 @@
-/*
- * drivers/mtd/nand/ppchameleonevb.c
- *
- * Copyright (C) 2003 DAVE Srl (info@wawnet.biz)
- *
- * Derived from drivers/mtd/nand/edb7312.c
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Overview:
- * This is a device driver for the NAND flash devices found on the
- * PPChameleon/PPChameleonEVB system.
- * PPChameleon options (autodetected):
- * - BA model: no NAND
- * - ME model: 32MB (Samsung K9F5608U0B)
- * - HI model: 128MB (Samsung K9F1G08UOM)
- * PPChameleonEVB options:
- * - 32MB (Samsung K9F5608U0B)
- */
-
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-#include <platforms/PPChameleonEVB.h>
-
-#undef USE_READY_BUSY_PIN
-#define USE_READY_BUSY_PIN
-/* see datasheets (tR) */
-#define NAND_BIG_DELAY_US 25
-#define NAND_SMALL_DELAY_US 10
-
-/* handy sizes */
-#define SZ_4M 0x00400000
-#define NAND_SMALL_SIZE 0x02000000
-#define NAND_MTD_NAME "ppchameleon-nand"
-#define NAND_EVB_MTD_NAME "ppchameleonevb-nand"
-
-/* GPIO pins used to drive NAND chip mounted on processor module */
-#define NAND_nCE_GPIO_PIN (0x80000000 >> 1)
-#define NAND_CLE_GPIO_PIN (0x80000000 >> 2)
-#define NAND_ALE_GPIO_PIN (0x80000000 >> 3)
-#define NAND_RB_GPIO_PIN (0x80000000 >> 4)
-/* GPIO pins used to drive NAND chip mounted on EVB */
-#define NAND_EVB_nCE_GPIO_PIN (0x80000000 >> 14)
-#define NAND_EVB_CLE_GPIO_PIN (0x80000000 >> 15)
-#define NAND_EVB_ALE_GPIO_PIN (0x80000000 >> 16)
-#define NAND_EVB_RB_GPIO_PIN (0x80000000 >> 31)
-
-/*
- * MTD structure for PPChameleonEVB board
- */
-static struct mtd_info *ppchameleon_mtd = NULL;
-static struct mtd_info *ppchameleonevb_mtd = NULL;
-
-/*
- * Module stuff
- */
-static unsigned long ppchameleon_fio_pbase = CFG_NAND0_PADDR;
-static unsigned long ppchameleonevb_fio_pbase = CFG_NAND1_PADDR;
-
-#ifdef MODULE
-module_param(ppchameleon_fio_pbase, ulong, 0);
-module_param(ppchameleonevb_fio_pbase, ulong, 0);
-#else
-__setup("ppchameleon_fio_pbase=", ppchameleon_fio_pbase);
-__setup("ppchameleonevb_fio_pbase=", ppchameleonevb_fio_pbase);
-#endif
-
-/*
- * Define static partitions for flash devices
- */
-static struct mtd_partition partition_info_hi[] = {
- { .name = "PPChameleon HI Nand Flash",
- .offset = 0,
- .size = 128 * 1024 * 1024
- }
-};
-
-static struct mtd_partition partition_info_me[] = {
- { .name = "PPChameleon ME Nand Flash",
- .offset = 0,
- .size = 32 * 1024 * 1024
- }
-};
-
-static struct mtd_partition partition_info_evb[] = {
- { .name = "PPChameleonEVB Nand Flash",
- .offset = 0,
- .size = 32 * 1024 * 1024
- }
-};
-
-#define NUM_PARTITIONS 1
-
-/*
- * hardware specific access to control-lines
- */
-static void ppchameleon_hwcontrol(struct mtd_info *mtdinfo, int cmd,
- unsigned int ctrl)
-{
- struct nand_chip *chip = mtd->priv;
-
- if (ctrl & NAND_CTRL_CHANGE) {
-#error Missing headerfiles. No way to fix this. -tglx
- switch (cmd) {
- case NAND_CTL_SETCLE:
- MACRO_NAND_CTL_SETCLE((unsigned long)CFG_NAND0_PADDR);
- break;
- case NAND_CTL_CLRCLE:
- MACRO_NAND_CTL_CLRCLE((unsigned long)CFG_NAND0_PADDR);
- break;
- case NAND_CTL_SETALE:
- MACRO_NAND_CTL_SETALE((unsigned long)CFG_NAND0_PADDR);
- break;
- case NAND_CTL_CLRALE:
- MACRO_NAND_CTL_CLRALE((unsigned long)CFG_NAND0_PADDR);
- break;
- case NAND_CTL_SETNCE:
- MACRO_NAND_ENABLE_CE((unsigned long)CFG_NAND0_PADDR);
- break;
- case NAND_CTL_CLRNCE:
- MACRO_NAND_DISABLE_CE((unsigned long)CFG_NAND0_PADDR);
- break;
- }
- }
- if (cmd != NAND_CMD_NONE)
- writeb(cmd, chip->IO_ADDR_W);
-}
-
-static void ppchameleonevb_hwcontrol(struct mtd_info *mtdinfo, int cmd,
- unsigned int ctrl)
-{
- struct nand_chip *chip = mtd->priv;
-
- if (ctrl & NAND_CTRL_CHANGE) {
-#error Missing headerfiles. No way to fix this. -tglx
- switch (cmd) {
- case NAND_CTL_SETCLE:
- MACRO_NAND_CTL_SETCLE((unsigned long)CFG_NAND1_PADDR);
- break;
- case NAND_CTL_CLRCLE:
- MACRO_NAND_CTL_CLRCLE((unsigned long)CFG_NAND1_PADDR);
- break;
- case NAND_CTL_SETALE:
- MACRO_NAND_CTL_SETALE((unsigned long)CFG_NAND1_PADDR);
- break;
- case NAND_CTL_CLRALE:
- MACRO_NAND_CTL_CLRALE((unsigned long)CFG_NAND1_PADDR);
- break;
- case NAND_CTL_SETNCE:
- MACRO_NAND_ENABLE_CE((unsigned long)CFG_NAND1_PADDR);
- break;
- case NAND_CTL_CLRNCE:
- MACRO_NAND_DISABLE_CE((unsigned long)CFG_NAND1_PADDR);
- break;
- }
- }
- if (cmd != NAND_CMD_NONE)
- writeb(cmd, chip->IO_ADDR_W);
-}
-
-#ifdef USE_READY_BUSY_PIN
-/*
- * read device ready pin
- */
-static int ppchameleon_device_ready(struct mtd_info *minfo)
-{
- if (in_be32((volatile unsigned *)GPIO0_IR) & NAND_RB_GPIO_PIN)
- return 1;
- return 0;
-}
-
-static int ppchameleonevb_device_ready(struct mtd_info *minfo)
-{
- if (in_be32((volatile unsigned *)GPIO0_IR) & NAND_EVB_RB_GPIO_PIN)
- return 1;
- return 0;
-}
-#endif
-
-/*
- * Main initialization routine
- */
-static int __init ppchameleonevb_init(void)
-{
- struct nand_chip *this;
- void __iomem *ppchameleon_fio_base;
- void __iomem *ppchameleonevb_fio_base;
-
- /*********************************
- * Processor module NAND (if any) *
- *********************************/
- /* Allocate memory for MTD device structure and private data */
- ppchameleon_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
- if (!ppchameleon_mtd) {
- printk("Unable to allocate PPChameleon NAND MTD device structure.\n");
- return -ENOMEM;
- }
-
- /* map physical address */
- ppchameleon_fio_base = ioremap(ppchameleon_fio_pbase, SZ_4M);
- if (!ppchameleon_fio_base) {
- printk("ioremap PPChameleon NAND flash failed\n");
- kfree(ppchameleon_mtd);
- return -EIO;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *)(&ppchameleon_mtd[1]);
-
- /* Initialize structures */
- memset(ppchameleon_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- ppchameleon_mtd->priv = this;
- ppchameleon_mtd->owner = THIS_MODULE;
-
- /* Initialize GPIOs */
- /* Pin mapping for NAND chip */
- /*
- CE GPIO_01
- CLE GPIO_02
- ALE GPIO_03
- R/B GPIO_04
- */
- /* output select */
- out_be32((volatile unsigned *)GPIO0_OSRH, in_be32((volatile unsigned *)GPIO0_OSRH) & 0xC0FFFFFF);
- /* three-state select */
- out_be32((volatile unsigned *)GPIO0_TSRH, in_be32((volatile unsigned *)GPIO0_TSRH) & 0xC0FFFFFF);
- /* enable output driver */
- out_be32((volatile unsigned *)GPIO0_TCR,
- in_be32((volatile unsigned *)GPIO0_TCR) | NAND_nCE_GPIO_PIN | NAND_CLE_GPIO_PIN | NAND_ALE_GPIO_PIN);
-#ifdef USE_READY_BUSY_PIN
- /* three-state select */
- out_be32((volatile unsigned *)GPIO0_TSRH, in_be32((volatile unsigned *)GPIO0_TSRH) & 0xFF3FFFFF);
- /* high-impedecence */
- out_be32((volatile unsigned *)GPIO0_TCR, in_be32((volatile unsigned *)GPIO0_TCR) & (~NAND_RB_GPIO_PIN));
- /* input select */
- out_be32((volatile unsigned *)GPIO0_ISR1H,
- (in_be32((volatile unsigned *)GPIO0_ISR1H) & 0xFF3FFFFF) | 0x00400000);
-#endif
-
- /* insert callbacks */
- this->IO_ADDR_R = ppchameleon_fio_base;
- this->IO_ADDR_W = ppchameleon_fio_base;
- this->cmd_ctrl = ppchameleon_hwcontrol;
-#ifdef USE_READY_BUSY_PIN
- this->dev_ready = ppchameleon_device_ready;
-#endif
- this->chip_delay = NAND_BIG_DELAY_US;
- /* ECC mode */
- this->ecc.mode = NAND_ECC_SOFT;
-
- /* Scan to find existence of the device (it could not be mounted) */
- if (nand_scan(ppchameleon_mtd, 1)) {
- iounmap((void *)ppchameleon_fio_base);
- ppchameleon_fio_base = NULL;
- kfree(ppchameleon_mtd);
- goto nand_evb_init;
- }
-#ifndef USE_READY_BUSY_PIN
- /* Adjust delay if necessary */
- if (ppchameleon_mtd->size == NAND_SMALL_SIZE)
- this->chip_delay = NAND_SMALL_DELAY_US;
-#endif
-
- ppchameleon_mtd->name = "ppchameleon-nand";
-
- /* Register the partitions */
- mtd_device_parse_register(ppchameleon_mtd, NULL, NULL,
- ppchameleon_mtd->size == NAND_SMALL_SIZE ?
- partition_info_me : partition_info_hi,
- NUM_PARTITIONS);
-
- nand_evb_init:
- /****************************
- * EVB NAND (always present) *
- ****************************/
- /* Allocate memory for MTD device structure and private data */
- ppchameleonevb_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
- if (!ppchameleonevb_mtd) {
- printk("Unable to allocate PPChameleonEVB NAND MTD device structure.\n");
- if (ppchameleon_fio_base)
- iounmap(ppchameleon_fio_base);
- return -ENOMEM;
- }
-
- /* map physical address */
- ppchameleonevb_fio_base = ioremap(ppchameleonevb_fio_pbase, SZ_4M);
- if (!ppchameleonevb_fio_base) {
- printk("ioremap PPChameleonEVB NAND flash failed\n");
- kfree(ppchameleonevb_mtd);
- if (ppchameleon_fio_base)
- iounmap(ppchameleon_fio_base);
- return -EIO;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *)(&ppchameleonevb_mtd[1]);
-
- /* Initialize structures */
- memset(ppchameleonevb_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- ppchameleonevb_mtd->priv = this;
-
- /* Initialize GPIOs */
- /* Pin mapping for NAND chip */
- /*
- CE GPIO_14
- CLE GPIO_15
- ALE GPIO_16
- R/B GPIO_31
- */
- /* output select */
- out_be32((volatile unsigned *)GPIO0_OSRH, in_be32((volatile unsigned *)GPIO0_OSRH) & 0xFFFFFFF0);
- out_be32((volatile unsigned *)GPIO0_OSRL, in_be32((volatile unsigned *)GPIO0_OSRL) & 0x3FFFFFFF);
- /* three-state select */
- out_be32((volatile unsigned *)GPIO0_TSRH, in_be32((volatile unsigned *)GPIO0_TSRH) & 0xFFFFFFF0);
- out_be32((volatile unsigned *)GPIO0_TSRL, in_be32((volatile unsigned *)GPIO0_TSRL) & 0x3FFFFFFF);
- /* enable output driver */
- out_be32((volatile unsigned *)GPIO0_TCR, in_be32((volatile unsigned *)GPIO0_TCR) | NAND_EVB_nCE_GPIO_PIN |
- NAND_EVB_CLE_GPIO_PIN | NAND_EVB_ALE_GPIO_PIN);
-#ifdef USE_READY_BUSY_PIN
- /* three-state select */
- out_be32((volatile unsigned *)GPIO0_TSRL, in_be32((volatile unsigned *)GPIO0_TSRL) & 0xFFFFFFFC);
- /* high-impedecence */
- out_be32((volatile unsigned *)GPIO0_TCR, in_be32((volatile unsigned *)GPIO0_TCR) & (~NAND_EVB_RB_GPIO_PIN));
- /* input select */
- out_be32((volatile unsigned *)GPIO0_ISR1L,
- (in_be32((volatile unsigned *)GPIO0_ISR1L) & 0xFFFFFFFC) | 0x00000001);
-#endif
-
- /* insert callbacks */
- this->IO_ADDR_R = ppchameleonevb_fio_base;
- this->IO_ADDR_W = ppchameleonevb_fio_base;
- this->cmd_ctrl = ppchameleonevb_hwcontrol;
-#ifdef USE_READY_BUSY_PIN
- this->dev_ready = ppchameleonevb_device_ready;
-#endif
- this->chip_delay = NAND_SMALL_DELAY_US;
-
- /* ECC mode */
- this->ecc.mode = NAND_ECC_SOFT;
-
- /* Scan to find existence of the device */
- if (nand_scan(ppchameleonevb_mtd, 1)) {
- iounmap((void *)ppchameleonevb_fio_base);
- kfree(ppchameleonevb_mtd);
- if (ppchameleon_fio_base)
- iounmap(ppchameleon_fio_base);
- return -ENXIO;
- }
-
- ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME;
-
- /* Register the partitions */
- mtd_device_parse_register(ppchameleonevb_mtd, NULL, NULL,
- ppchameleon_mtd->size == NAND_SMALL_SIZE ?
- partition_info_me : partition_info_hi,
- NUM_PARTITIONS);
-
- /* Return happy */
- return 0;
-}
-
-module_init(ppchameleonevb_init);
-
-/*
- * Clean up routine
- */
-static void __exit ppchameleonevb_cleanup(void)
-{
- struct nand_chip *this;
-
- /* Release resources, unregister device(s) */
- nand_release(ppchameleon_mtd);
- nand_release(ppchameleonevb_mtd);
-
- /* Release iomaps */
- this = (struct nand_chip *) &ppchameleon_mtd[1];
- iounmap((void *) this->IO_ADDR_R);
- this = (struct nand_chip *) &ppchameleonevb_mtd[1];
- iounmap((void *) this->IO_ADDR_R);
-
- /* Free the MTD device structure */
- kfree (ppchameleon_mtd);
- kfree (ppchameleonevb_mtd);
-}
-module_exit(ppchameleonevb_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("DAVE Srl <support-ppchameleon@dave-tech.it>");
-MODULE_DESCRIPTION("MTD map driver for DAVE Srl PPChameleonEVB board");
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 37ee75c..dec80ca 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -989,7 +989,7 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
}
pxa3xx_flash_ids[0].name = f->name;
- pxa3xx_flash_ids[0].id = (f->chip_id >> 8) & 0xffff;
+ pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
pxa3xx_flash_ids[0].pagesize = f->page_size;
chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c
deleted file mode 100644
index e55b5cf..0000000
--- a/drivers/mtd/nand/rtc_from4.c
+++ /dev/null
@@ -1,624 +0,0 @@
-/*
- * drivers/mtd/nand/rtc_from4.c
- *
- * Copyright (C) 2004 Red Hat, Inc.
- *
- * Derived from drivers/mtd/nand/spia.c
- * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Overview:
- * This is a device driver for the AG-AND flash device found on the
- * Renesas Technology Corp. Flash ROM 4-slot interface board (FROM_BOARD4),
- * which utilizes the Renesas HN29V1G91T-30 part.
- * This chip is a 1 GBibit (128MiB x 8 bits) AG-AND flash device.
- */
-
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/rslib.h>
-#include <linux/bitrev.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-
-/*
- * MTD structure for Renesas board
- */
-static struct mtd_info *rtc_from4_mtd = NULL;
-
-#define RTC_FROM4_MAX_CHIPS 2
-
-/* HS77x9 processor register defines */
-#define SH77X9_BCR1 ((volatile unsigned short *)(0xFFFFFF60))
-#define SH77X9_BCR2 ((volatile unsigned short *)(0xFFFFFF62))
-#define SH77X9_WCR1 ((volatile unsigned short *)(0xFFFFFF64))
-#define SH77X9_WCR2 ((volatile unsigned short *)(0xFFFFFF66))
-#define SH77X9_MCR ((volatile unsigned short *)(0xFFFFFF68))
-#define SH77X9_PCR ((volatile unsigned short *)(0xFFFFFF6C))
-#define SH77X9_FRQCR ((volatile unsigned short *)(0xFFFFFF80))
-
-/*
- * Values specific to the Renesas Technology Corp. FROM_BOARD4 (used with HS77x9 processor)
- */
-/* Address where flash is mapped */
-#define RTC_FROM4_FIO_BASE 0x14000000
-
-/* CLE and ALE are tied to address lines 5 & 4, respectively */
-#define RTC_FROM4_CLE (1 << 5)
-#define RTC_FROM4_ALE (1 << 4)
-
-/* address lines A24-A22 used for chip selection */
-#define RTC_FROM4_NAND_ADDR_SLOT3 (0x00800000)
-#define RTC_FROM4_NAND_ADDR_SLOT4 (0x00C00000)
-#define RTC_FROM4_NAND_ADDR_FPGA (0x01000000)
-/* mask address lines A24-A22 used for chip selection */
-#define RTC_FROM4_NAND_ADDR_MASK (RTC_FROM4_NAND_ADDR_SLOT3 | RTC_FROM4_NAND_ADDR_SLOT4 | RTC_FROM4_NAND_ADDR_FPGA)
-
-/* FPGA status register for checking device ready (bit zero) */
-#define RTC_FROM4_FPGA_SR (RTC_FROM4_NAND_ADDR_FPGA | 0x00000002)
-#define RTC_FROM4_DEVICE_READY 0x0001
-
-/* FPGA Reed-Solomon ECC Control register */
-
-#define RTC_FROM4_RS_ECC_CTL (RTC_FROM4_NAND_ADDR_FPGA | 0x00000050)
-#define RTC_FROM4_RS_ECC_CTL_CLR (1 << 7)
-#define RTC_FROM4_RS_ECC_CTL_GEN (1 << 6)
-#define RTC_FROM4_RS_ECC_CTL_FD_E (1 << 5)
-
-/* FPGA Reed-Solomon ECC code base */
-#define RTC_FROM4_RS_ECC (RTC_FROM4_NAND_ADDR_FPGA | 0x00000060)
-#define RTC_FROM4_RS_ECCN (RTC_FROM4_NAND_ADDR_FPGA | 0x00000080)
-
-/* FPGA Reed-Solomon ECC check register */
-#define RTC_FROM4_RS_ECC_CHK (RTC_FROM4_NAND_ADDR_FPGA | 0x00000070)
-#define RTC_FROM4_RS_ECC_CHK_ERROR (1 << 7)
-
-#define ERR_STAT_ECC_AVAILABLE 0x20
-
-/* Undefine for software ECC */
-#define RTC_FROM4_HWECC 1
-
-/* Define as 1 for no virtual erase blocks (in JFFS2) */
-#define RTC_FROM4_NO_VIRTBLOCKS 0
-
-/*
- * Module stuff
- */
-static void __iomem *rtc_from4_fio_base = (void *)P2SEGADDR(RTC_FROM4_FIO_BASE);
-
-static const struct mtd_partition partition_info[] = {
- {
- .name = "Renesas flash partition 1",
- .offset = 0,
- .size = MTDPART_SIZ_FULL},
-};
-
-#define NUM_PARTITIONS 1
-
-/*
- * hardware specific flash bbt decriptors
- * Note: this is to allow debugging by disabling
- * NAND_BBT_CREATE and/or NAND_BBT_WRITE
- *
- */
-static uint8_t bbt_pattern[] = { 'B', 'b', 't', '0' };
-static uint8_t mirror_pattern[] = { '1', 't', 'b', 'B' };
-
-static struct nand_bbt_descr rtc_from4_bbt_main_descr = {
- .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
- | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
- .offs = 40,
- .len = 4,
- .veroffs = 44,
- .maxblocks = 4,
- .pattern = bbt_pattern
-};
-
-static struct nand_bbt_descr rtc_from4_bbt_mirror_descr = {
- .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
- | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
- .offs = 40,
- .len = 4,
- .veroffs = 44,
- .maxblocks = 4,
- .pattern = mirror_pattern
-};
-
-#ifdef RTC_FROM4_HWECC
-
-/* the Reed Solomon control structure */
-static struct rs_control *rs_decoder;
-
-/*
- * hardware specific Out Of Band information
- */
-static struct nand_ecclayout rtc_from4_nand_oobinfo = {
- .eccbytes = 32,
- .eccpos = {
- 0, 1, 2, 3, 4, 5, 6, 7,
- 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 20, 21, 22, 23,
- 24, 25, 26, 27, 28, 29, 30, 31},
- .oobfree = {{32, 32}}
-};
-
-#endif
-
-/*
- * rtc_from4_hwcontrol - hardware specific access to control-lines
- * @mtd: MTD device structure
- * @cmd: hardware control command
- *
- * Address lines (A5 and A4) are used to control Command and Address Latch
- * Enable on this board, so set the read/write address appropriately.
- *
- * Chip Enable is also controlled by the Chip Select (CS5) and
- * Address lines (A24-A22), so no action is required here.
- *
- */
-static void rtc_from4_hwcontrol(struct mtd_info *mtd, int cmd,
- unsigned int ctrl)
-{
- struct nand_chip *chip = (mtd->priv);
-
- if (cmd == NAND_CMD_NONE)
- return;
-
- if (ctrl & NAND_CLE)
- writeb(cmd, chip->IO_ADDR_W | RTC_FROM4_CLE);
- else
- writeb(cmd, chip->IO_ADDR_W | RTC_FROM4_ALE);
-}
-
-/*
- * rtc_from4_nand_select_chip - hardware specific chip select
- * @mtd: MTD device structure
- * @chip: Chip to select (0 == slot 3, 1 == slot 4)
- *
- * The chip select is based on address lines A24-A22.
- * This driver uses flash slots 3 and 4 (A23-A22).
- *
- */
-static void rtc_from4_nand_select_chip(struct mtd_info *mtd, int chip)
-{
- struct nand_chip *this = mtd->priv;
-
- this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R & ~RTC_FROM4_NAND_ADDR_MASK);
- this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W & ~RTC_FROM4_NAND_ADDR_MASK);
-
- switch (chip) {
-
- case 0: /* select slot 3 chip */
- this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R | RTC_FROM4_NAND_ADDR_SLOT3);
- this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_NAND_ADDR_SLOT3);
- break;
- case 1: /* select slot 4 chip */
- this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R | RTC_FROM4_NAND_ADDR_SLOT4);
- this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_NAND_ADDR_SLOT4);
- break;
-
- }
-}
-
-/*
- * rtc_from4_nand_device_ready - hardware specific ready/busy check
- * @mtd: MTD device structure
- *
- * This board provides the Ready/Busy state in the status register
- * of the FPGA. Bit zero indicates the RDY(1)/BSY(0) signal.
- *
- */
-static int rtc_from4_nand_device_ready(struct mtd_info *mtd)
-{
- unsigned short status;
-
- status = *((volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_FPGA_SR));
-
- return (status & RTC_FROM4_DEVICE_READY);
-
-}
-
-/*
- * deplete - code to perform device recovery in case there was a power loss
- * @mtd: MTD device structure
- * @chip: Chip to select (0 == slot 3, 1 == slot 4)
- *
- * If there was a sudden loss of power during an erase operation, a
- * "device recovery" operation must be performed when power is restored
- * to ensure correct operation. This routine performs the required steps
- * for the requested chip.
- *
- * See page 86 of the data sheet for details.
- *
- */
-static void deplete(struct mtd_info *mtd, int chip)
-{
- struct nand_chip *this = mtd->priv;
-
- /* wait until device is ready */
- while (!this->dev_ready(mtd)) ;
-
- this->select_chip(mtd, chip);
-
- /* Send the commands for device recovery, phase 1 */
- this->cmdfunc(mtd, NAND_CMD_DEPLETE1, 0x0000, 0x0000);
- this->cmdfunc(mtd, NAND_CMD_DEPLETE2, -1, -1);
-
- /* Send the commands for device recovery, phase 2 */
- this->cmdfunc(mtd, NAND_CMD_DEPLETE1, 0x0000, 0x0004);
- this->cmdfunc(mtd, NAND_CMD_DEPLETE2, -1, -1);
-
-}
-
-#ifdef RTC_FROM4_HWECC
-/*
- * rtc_from4_enable_hwecc - hardware specific hardware ECC enable function
- * @mtd: MTD device structure
- * @mode: I/O mode; read or write
- *
- * enable hardware ECC for data read or write
- *
- */
-static void rtc_from4_enable_hwecc(struct mtd_info *mtd, int mode)
-{
- volatile unsigned short *rs_ecc_ctl = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC_CTL);
- unsigned short status;
-
- switch (mode) {
- case NAND_ECC_READ:
- status = RTC_FROM4_RS_ECC_CTL_CLR | RTC_FROM4_RS_ECC_CTL_FD_E;
-
- *rs_ecc_ctl = status;
- break;
-
- case NAND_ECC_READSYN:
- status = 0x00;
-
- *rs_ecc_ctl = status;
- break;
-
- case NAND_ECC_WRITE:
- status = RTC_FROM4_RS_ECC_CTL_CLR | RTC_FROM4_RS_ECC_CTL_GEN | RTC_FROM4_RS_ECC_CTL_FD_E;
-
- *rs_ecc_ctl = status;
- break;
-
- default:
- BUG();
- break;
- }
-
-}
-
-/*
- * rtc_from4_calculate_ecc - hardware specific code to read ECC code
- * @mtd: MTD device structure
- * @dat: buffer containing the data to generate ECC codes
- * @ecc_code ECC codes calculated
- *
- * The ECC code is calculated by the FPGA. All we have to do is read the values
- * from the FPGA registers.
- *
- * Note: We read from the inverted registers, since data is inverted before
- * the code is calculated. So all 0xff data (blank page) results in all 0xff rs code
- *
- */
-static void rtc_from4_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
-{
- volatile unsigned short *rs_eccn = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECCN);
- unsigned short value;
- int i;
-
- for (i = 0; i < 8; i++) {
- value = *rs_eccn;
- ecc_code[i] = (unsigned char)value;
- rs_eccn++;
- }
- ecc_code[7] |= 0x0f; /* set the last four bits (not used) */
-}
-
-/*
- * rtc_from4_correct_data - hardware specific code to correct data using ECC code
- * @mtd: MTD device structure
- * @buf: buffer containing the data to generate ECC codes
- * @ecc1 ECC codes read
- * @ecc2 ECC codes calculated
- *
- * The FPGA tells us fast, if there's an error or not. If no, we go back happy
- * else we read the ecc results from the fpga and call the rs library to decode
- * and hopefully correct the error.
- *
- */
-static int rtc_from4_correct_data(struct mtd_info *mtd, const u_char *buf, u_char *ecc1, u_char *ecc2)
-{
- int i, j, res;
- unsigned short status;
- uint16_t par[6], syn[6];
- uint8_t ecc[8];
- volatile unsigned short *rs_ecc;
-
- status = *((volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC_CHK));
-
- if (!(status & RTC_FROM4_RS_ECC_CHK_ERROR)) {
- return 0;
- }
-
- /* Read the syndrome pattern from the FPGA and correct the bitorder */
- rs_ecc = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC);
- for (i = 0; i < 8; i++) {
- ecc[i] = bitrev8(*rs_ecc);
- rs_ecc++;
- }
-
- /* convert into 6 10bit syndrome fields */
- par[5] = rs_decoder->index_of[(((uint16_t) ecc[0] >> 0) & 0x0ff) | (((uint16_t) ecc[1] << 8) & 0x300)];
- par[4] = rs_decoder->index_of[(((uint16_t) ecc[1] >> 2) & 0x03f) | (((uint16_t) ecc[2] << 6) & 0x3c0)];
- par[3] = rs_decoder->index_of[(((uint16_t) ecc[2] >> 4) & 0x00f) | (((uint16_t) ecc[3] << 4) & 0x3f0)];
- par[2] = rs_decoder->index_of[(((uint16_t) ecc[3] >> 6) & 0x003) | (((uint16_t) ecc[4] << 2) & 0x3fc)];
- par[1] = rs_decoder->index_of[(((uint16_t) ecc[5] >> 0) & 0x0ff) | (((uint16_t) ecc[6] << 8) & 0x300)];
- par[0] = (((uint16_t) ecc[6] >> 2) & 0x03f) | (((uint16_t) ecc[7] << 6) & 0x3c0);
-
- /* Convert to computable syndrome */
- for (i = 0; i < 6; i++) {
- syn[i] = par[0];
- for (j = 1; j < 6; j++)
- if (par[j] != rs_decoder->nn)
- syn[i] ^= rs_decoder->alpha_to[rs_modnn(rs_decoder, par[j] + i * j)];
-
- /* Convert to index form */
- syn[i] = rs_decoder->index_of[syn[i]];
- }
-
- /* Let the library code do its magic. */
- res = decode_rs8(rs_decoder, (uint8_t *) buf, par, 512, syn, 0, NULL, 0xff, NULL);
- if (res > 0) {
- pr_debug("rtc_from4_correct_data: " "ECC corrected %d errors on read\n", res);
- }
- return res;
-}
-
-/**
- * rtc_from4_errstat - perform additional error status checks
- * @mtd: MTD device structure
- * @this: NAND chip structure
- * @state: state or the operation
- * @status: status code returned from read status
- * @page: startpage inside the chip, must be called with (page & this->pagemask)
- *
- * Perform additional error status checks on erase and write failures
- * to determine if errors are correctable. For this device, correctable
- * 1-bit errors on erase and write are considered acceptable.
- *
- * note: see pages 34..37 of data sheet for details.
- *
- */
-static int rtc_from4_errstat(struct mtd_info *mtd, struct nand_chip *this,
- int state, int status, int page)
-{
- int er_stat = 0;
- int rtn, retlen;
- size_t len;
- uint8_t *buf;
- int i;
-
- this->cmdfunc(mtd, NAND_CMD_STATUS_CLEAR, -1, -1);
-
- if (state == FL_ERASING) {
-
- for (i = 0; i < 4; i++) {
- if (!(status & 1 << (i + 1)))
- continue;
- this->cmdfunc(mtd, (NAND_CMD_STATUS_ERROR + i + 1),
- -1, -1);
- rtn = this->read_byte(mtd);
- this->cmdfunc(mtd, NAND_CMD_STATUS_RESET, -1, -1);
-
- /* err_ecc_not_avail */
- if (!(rtn & ERR_STAT_ECC_AVAILABLE))
- er_stat |= 1 << (i + 1);
- }
-
- } else if (state == FL_WRITING) {
-
- unsigned long corrected = mtd->ecc_stats.corrected;
-
- /* single bank write logic */
- this->cmdfunc(mtd, NAND_CMD_STATUS_ERROR, -1, -1);
- rtn = this->read_byte(mtd);
- this->cmdfunc(mtd, NAND_CMD_STATUS_RESET, -1, -1);
-
- if (!(rtn & ERR_STAT_ECC_AVAILABLE)) {
- /* err_ecc_not_avail */
- er_stat |= 1 << 1;
- goto out;
- }
-
- len = mtd->writesize;
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf) {
- er_stat = 1;
- goto out;
- }
-
- /* recovery read */
- rtn = nand_do_read(mtd, page, len, &retlen, buf);
-
- /* if read failed or > 1-bit error corrected */
- if (rtn || (mtd->ecc_stats.corrected - corrected) > 1)
- er_stat |= 1 << 1;
- kfree(buf);
- }
-out:
- rtn = status;
- if (er_stat == 0) { /* if ECC is available */
- rtn = (status & ~NAND_STATUS_FAIL); /* clear the error bit */
- }
-
- return rtn;
-}
-#endif
-
-/*
- * Main initialization routine
- */
-static int __init rtc_from4_init(void)
-{
- struct nand_chip *this;
- unsigned short bcr1, bcr2, wcr2;
- int i;
- int ret;
-
- /* Allocate memory for MTD device structure and private data */
- rtc_from4_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
- if (!rtc_from4_mtd) {
- printk("Unable to allocate Renesas NAND MTD device structure.\n");
- return -ENOMEM;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *)(&rtc_from4_mtd[1]);
-
- /* Initialize structures */
- memset(rtc_from4_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- rtc_from4_mtd->priv = this;
- rtc_from4_mtd->owner = THIS_MODULE;
-
- /* set area 5 as PCMCIA mode to clear the spec of tDH(Data hold time;9ns min) */
- bcr1 = *SH77X9_BCR1 & ~0x0002;
- bcr1 |= 0x0002;
- *SH77X9_BCR1 = bcr1;
-
- /* set */
- bcr2 = *SH77X9_BCR2 & ~0x0c00;
- bcr2 |= 0x0800;
- *SH77X9_BCR2 = bcr2;
-
- /* set area 5 wait states */
- wcr2 = *SH77X9_WCR2 & ~0x1c00;
- wcr2 |= 0x1c00;
- *SH77X9_WCR2 = wcr2;
-
- /* Set address of NAND IO lines */
- this->IO_ADDR_R = rtc_from4_fio_base;
- this->IO_ADDR_W = rtc_from4_fio_base;
- /* Set address of hardware control function */
- this->cmd_ctrl = rtc_from4_hwcontrol;
- /* Set address of chip select function */
- this->select_chip = rtc_from4_nand_select_chip;
- /* command delay time (in us) */
- this->chip_delay = 100;
- /* return the status of the Ready/Busy line */
- this->dev_ready = rtc_from4_nand_device_ready;
-
-#ifdef RTC_FROM4_HWECC
- printk(KERN_INFO "rtc_from4_init: using hardware ECC detection.\n");
-
- this->ecc.mode = NAND_ECC_HW_SYNDROME;
- this->ecc.size = 512;
- this->ecc.bytes = 8;
- this->ecc.strength = 3;
- /* return the status of extra status and ECC checks */
- this->errstat = rtc_from4_errstat;
- /* set the nand_oobinfo to support FPGA H/W error detection */
- this->ecc.layout = &rtc_from4_nand_oobinfo;
- this->ecc.hwctl = rtc_from4_enable_hwecc;
- this->ecc.calculate = rtc_from4_calculate_ecc;
- this->ecc.correct = rtc_from4_correct_data;
-
- /* We could create the decoder on demand, if memory is a concern.
- * This way we have it handy, if an error happens
- *
- * Symbolsize is 10 (bits)
- * Primitve polynomial is x^10+x^3+1
- * first consecutive root is 0
- * primitve element to generate roots = 1
- * generator polinomial degree = 6
- */
- rs_decoder = init_rs(10, 0x409, 0, 1, 6);
- if (!rs_decoder) {
- printk(KERN_ERR "Could not create a RS decoder\n");
- ret = -ENOMEM;
- goto err_1;
- }
-#else
- printk(KERN_INFO "rtc_from4_init: using software ECC detection.\n");
-
- this->ecc.mode = NAND_ECC_SOFT;
-#endif
-
- /* set the bad block tables to support debugging */
- this->bbt_td = &rtc_from4_bbt_main_descr;
- this->bbt_md = &rtc_from4_bbt_mirror_descr;
-
- /* Scan to find existence of the device */
- if (nand_scan(rtc_from4_mtd, RTC_FROM4_MAX_CHIPS)) {
- ret = -ENXIO;
- goto err_2;
- }
-
- /* Perform 'device recovery' for each chip in case there was a power loss. */
- for (i = 0; i < this->numchips; i++) {
- deplete(rtc_from4_mtd, i);
- }
-
-#if RTC_FROM4_NO_VIRTBLOCKS
- /* use a smaller erase block to minimize wasted space when a block is bad */
- /* note: this uses eight times as much RAM as using the default and makes */
- /* mounts take four times as long. */
- rtc_from4_mtd->flags |= MTD_NO_VIRTBLOCKS;
-#endif
-
- /* Register the partitions */
- ret = mtd_device_register(rtc_from4_mtd, partition_info,
- NUM_PARTITIONS);
- if (ret)
- goto err_3;
-
- /* Return happy */
- return 0;
-err_3:
- nand_release(rtc_from4_mtd);
-err_2:
- free_rs(rs_decoder);
-err_1:
- kfree(rtc_from4_mtd);
- return ret;
-}
-
-module_init(rtc_from4_init);
-
-/*
- * Clean up routine
- */
-static void __exit rtc_from4_cleanup(void)
-{
- /* Release resource, unregister partitions */
- nand_release(rtc_from4_mtd);
-
- /* Free the MTD device structure */
- kfree(rtc_from4_mtd);
-
-#ifdef RTC_FROM4_HWECC
- /* Free the reed solomon resources */
- if (rs_decoder) {
- free_rs(rs_decoder);
- }
-#endif
-}
-
-module_exit(rtc_from4_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("d.marlin <dmarlin@redhat.com");
-MODULE_DESCRIPTION("Board-specific glue layer for AG-AND flash on Renesas FROM_BOARD4");
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 57b3971..e57e18e 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -1081,7 +1081,6 @@ static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
return pdata;
}
#else /* CONFIG_OF */
-#define of_flctl_match NULL
static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
{
return NULL;
@@ -1219,22 +1218,11 @@ static struct platform_driver flctl_driver = {
.driver = {
.name = "sh_flctl",
.owner = THIS_MODULE,
- .of_match_table = of_flctl_match,
+ .of_match_table = of_match_ptr(of_flctl_match),
},
};
-static int __init flctl_nand_init(void)
-{
- return platform_driver_probe(&flctl_driver, flctl_probe);
-}
-
-static void __exit flctl_nand_cleanup(void)
-{
- platform_driver_unregister(&flctl_driver);
-}
-
-module_init(flctl_nand_init);
-module_exit(flctl_nand_cleanup);
+module_platform_driver_probe(flctl_driver, flctl_probe);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Yoshihiro Shimoda");
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
index 082bcdc..e8181ed 100644
--- a/drivers/mtd/nand/sm_common.c
+++ b/drivers/mtd/nand/sm_common.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/mtd/nand.h>
#include <linux/module.h>
+#include <linux/sizes.h>
#include "sm_common.h"
static struct nand_ecclayout nand_oob_sm = {
@@ -67,44 +68,37 @@ static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
return error;
}
-
static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
- {"SmartMedia 1MiB 5V", 0x6e, 256, 1, 0x1000, 0},
- {"SmartMedia 1MiB 3,3V", 0xe8, 256, 1, 0x1000, 0},
- {"SmartMedia 1MiB 3,3V", 0xec, 256, 1, 0x1000, 0},
- {"SmartMedia 2MiB 3,3V", 0xea, 256, 2, 0x1000, 0},
- {"SmartMedia 2MiB 5V", 0x64, 256, 2, 0x1000, 0},
- {"SmartMedia 2MiB 3,3V ROM", 0x5d, 512, 2, 0x2000, NAND_ROM},
- {"SmartMedia 4MiB 3,3V", 0xe3, 512, 4, 0x2000, 0},
- {"SmartMedia 4MiB 3,3/5V", 0xe5, 512, 4, 0x2000, 0},
- {"SmartMedia 4MiB 5V", 0x6b, 512, 4, 0x2000, 0},
- {"SmartMedia 4MiB 3,3V ROM", 0xd5, 512, 4, 0x2000, NAND_ROM},
- {"SmartMedia 8MiB 3,3V", 0xe6, 512, 8, 0x2000, 0},
- {"SmartMedia 8MiB 3,3V ROM", 0xd6, 512, 8, 0x2000, NAND_ROM},
- {"SmartMedia 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0},
- {"SmartMedia 16MiB 3,3V ROM", 0x57, 512, 16, 0x4000, NAND_ROM},
- {"SmartMedia 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0},
- {"SmartMedia 32MiB 3,3V ROM", 0x58, 512, 32, 0x4000, NAND_ROM},
- {"SmartMedia 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0},
- {"SmartMedia 64MiB 3,3V ROM", 0xd9, 512, 64, 0x4000, NAND_ROM},
- {"SmartMedia 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0},
- {"SmartMedia 128MiB 3,3V ROM", 0xda, 512, 128, 0x4000, NAND_ROM},
- {"SmartMedia 256MiB 3,3V", 0x71, 512, 256, 0x4000 },
- {"SmartMedia 256MiB 3,3V ROM", 0x5b, 512, 256, 0x4000, NAND_ROM},
- {NULL,}
+ LEGACY_ID_NAND("SmartMedia 2MiB 3,3V ROM", 0x5d, 2, SZ_8K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 4MiB 3,3V", 0xe3, 4, SZ_8K, 0),
+ LEGACY_ID_NAND("SmartMedia 4MiB 3,3/5V", 0xe5, 4, SZ_8K, 0),
+ LEGACY_ID_NAND("SmartMedia 4MiB 5V", 0x6b, 4, SZ_8K, 0),
+ LEGACY_ID_NAND("SmartMedia 4MiB 3,3V ROM", 0xd5, 4, SZ_8K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 8MiB 3,3V", 0xe6, 8, SZ_8K, 0),
+ LEGACY_ID_NAND("SmartMedia 8MiB 3,3V ROM", 0xd6, 8, SZ_8K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 16MiB 3,3V", 0x73, 16, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 16MiB 3,3V ROM", 0x57, 16, SZ_16K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 32MiB 3,3V", 0x75, 32, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 32MiB 3,3V ROM", 0x58, 32, SZ_16K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 64MiB 3,3V", 0x76, 64, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 64MiB 3,3V ROM", 0xd9, 64, SZ_16K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 128MiB 3,3V", 0x79, 128, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 128MiB 3,3V ROM", 0xda, 128, SZ_16K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 256MiB 3, 3V", 0x71, 256, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 256MiB 3,3V ROM", 0x5b, 256, SZ_16K, NAND_ROM),
+ {NULL}
};
static struct nand_flash_dev nand_xd_flash_ids[] = {
-
- {"xD 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0},
- {"xD 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0},
- {"xD 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0},
- {"xD 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0},
- {"xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, NAND_BROKEN_XD},
- {"xD 512MiB 3,3V", 0xdc, 512, 512, 0x4000, NAND_BROKEN_XD},
- {"xD 1GiB 3,3V", 0xd3, 512, 1024, 0x4000, NAND_BROKEN_XD},
- {"xD 2GiB 3,3V", 0xd5, 512, 2048, 0x4000, NAND_BROKEN_XD},
- {NULL,}
+ LEGACY_ID_NAND("xD 16MiB 3,3V", 0x73, 16, SZ_16K, 0),
+ LEGACY_ID_NAND("xD 32MiB 3,3V", 0x75, 32, SZ_16K, 0),
+ LEGACY_ID_NAND("xD 64MiB 3,3V", 0x76, 64, SZ_16K, 0),
+ LEGACY_ID_NAND("xD 128MiB 3,3V", 0x79, 128, SZ_16K, 0),
+ LEGACY_ID_NAND("xD 256MiB 3,3V", 0x71, 256, SZ_16K, NAND_BROKEN_XD),
+ LEGACY_ID_NAND("xD 512MiB 3,3V", 0xdc, 512, SZ_16K, NAND_BROKEN_XD),
+ LEGACY_ID_NAND("xD 1GiB 3,3V", 0xd3, 1024, SZ_16K, NAND_BROKEN_XD),
+ LEGACY_ID_NAND("xD 2GiB 3,3V", 0xd5, 2048, SZ_16K, NAND_BROKEN_XD),
+ {NULL}
};
int sm_register_device(struct mtd_info *mtd, int smartmedia)
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index e1e8748..7ed654c 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -427,18 +427,7 @@ static struct platform_driver txx9ndfmc_driver = {
},
};
-static int __init txx9ndfmc_init(void)
-{
- return platform_driver_probe(&txx9ndfmc_driver, txx9ndfmc_probe);
-}
-
-static void __exit txx9ndfmc_exit(void)
-{
- platform_driver_unregister(&txx9ndfmc_driver);
-}
-
-module_init(txx9ndfmc_init);
-module_exit(txx9ndfmc_exit);
+module_platform_driver_probe(txx9ndfmc_driver, txx9ndfmc_probe);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("TXx9 SoC NAND flash controller driver");
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index 30bd907..553d6d6 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -55,6 +55,7 @@ static int parse_ofpart_partitions(struct mtd_info *master,
while ((pp = of_get_next_child(node, pp))) {
const __be32 *reg;
int len;
+ int a_cells, s_cells;
reg = of_get_property(pp, "reg", &len);
if (!reg) {
@@ -62,8 +63,10 @@ static int parse_ofpart_partitions(struct mtd_info *master,
continue;
}
- (*pparts)[i].offset = be32_to_cpu(reg[0]);
- (*pparts)[i].size = be32_to_cpu(reg[1]);
+ a_cells = of_n_addr_cells(pp);
+ s_cells = of_n_size_cells(pp);
+ (*pparts)[i].offset = of_read_number(reg, a_cells);
+ (*pparts)[i].size = of_read_number(reg + a_cells, s_cells);
partname = of_get_property(pp, "label", &len);
if (!partname)
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index 91467bb..ab26072 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -40,7 +40,6 @@ config MTD_ONENAND_SAMSUNG
config MTD_ONENAND_OTP
bool "OneNAND OTP Support"
- select HAVE_MTD_OTP
help
One Block of the NAND Flash Array memory is reserved as
a One-Time Programmable Block memory area.
@@ -68,10 +67,4 @@ config MTD_ONENAND_2X_PROGRAM
And more recent chips
-config MTD_ONENAND_SIM
- tristate "OneNAND simulator support"
- help
- The simulator may simulate various OneNAND flash chips for the
- OneNAND MTD layer.
-
endif # MTD_ONENAND
diff --git a/drivers/mtd/onenand/Makefile b/drivers/mtd/onenand/Makefile
index 2b7884c..9d6540e 100644
--- a/drivers/mtd/onenand/Makefile
+++ b/drivers/mtd/onenand/Makefile
@@ -10,7 +10,4 @@ obj-$(CONFIG_MTD_ONENAND_GENERIC) += generic.o
obj-$(CONFIG_MTD_ONENAND_OMAP2) += omap2.o
obj-$(CONFIG_MTD_ONENAND_SAMSUNG) += samsung.o
-# Simulator
-obj-$(CONFIG_MTD_ONENAND_SIM) += onenand_sim.o
-
onenand-objs = onenand_base.o onenand_bbt.o
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index eec2aed..d98b198 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -832,19 +832,7 @@ static struct platform_driver omap2_onenand_driver = {
},
};
-static int __init omap2_onenand_init(void)
-{
- printk(KERN_INFO "OneNAND driver initializing\n");
- return platform_driver_register(&omap2_onenand_driver);
-}
-
-static void __exit omap2_onenand_exit(void)
-{
- platform_driver_unregister(&omap2_onenand_driver);
-}
-
-module_init(omap2_onenand_init);
-module_exit(omap2_onenand_exit);
+module_platform_driver(omap2_onenand_driver);
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/onenand/onenand_sim.c b/drivers/mtd/onenand/onenand_sim.c
deleted file mode 100644
index 85399e3..0000000
--- a/drivers/mtd/onenand/onenand_sim.c
+++ /dev/null
@@ -1,564 +0,0 @@
-/*
- * linux/drivers/mtd/onenand/onenand_sim.c
- *
- * The OneNAND simulator
- *
- * Copyright © 2005-2007 Samsung Electronics
- * Kyungmin Park <kyungmin.park@samsung.com>
- *
- * Vishak G <vishak.g at samsung.com>, Rohit Hagargundgi <h.rohit at samsung.com>
- * Flex-OneNAND simulator support
- * Copyright (C) Samsung Electronics, 2008
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/vmalloc.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/onenand.h>
-
-#include <linux/io.h>
-
-#ifndef CONFIG_ONENAND_SIM_MANUFACTURER
-#define CONFIG_ONENAND_SIM_MANUFACTURER 0xec
-#endif
-
-#ifndef CONFIG_ONENAND_SIM_DEVICE_ID
-#define CONFIG_ONENAND_SIM_DEVICE_ID 0x04
-#endif
-
-#define CONFIG_FLEXONENAND ((CONFIG_ONENAND_SIM_DEVICE_ID >> 9) & 1)
-
-#ifndef CONFIG_ONENAND_SIM_VERSION_ID
-#define CONFIG_ONENAND_SIM_VERSION_ID 0x1e
-#endif
-
-#ifndef CONFIG_ONENAND_SIM_TECHNOLOGY_ID
-#define CONFIG_ONENAND_SIM_TECHNOLOGY_ID CONFIG_FLEXONENAND
-#endif
-
-/* Initial boundary values for Flex-OneNAND Simulator */
-#ifndef CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY
-#define CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY 0x01
-#endif
-
-#ifndef CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY
-#define CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY 0x01
-#endif
-
-static int manuf_id = CONFIG_ONENAND_SIM_MANUFACTURER;
-static int device_id = CONFIG_ONENAND_SIM_DEVICE_ID;
-static int version_id = CONFIG_ONENAND_SIM_VERSION_ID;
-static int technology_id = CONFIG_ONENAND_SIM_TECHNOLOGY_ID;
-static int boundary[] = {
- CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY,
- CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY,
-};
-
-struct onenand_flash {
- void __iomem *base;
- void __iomem *data;
-};
-
-#define ONENAND_CORE(flash) (flash->data)
-#define ONENAND_CORE_SPARE(flash, this, offset) \
- ((flash->data) + (this->chipsize) + (offset >> 5))
-
-#define ONENAND_MAIN_AREA(this, offset) \
- (this->base + ONENAND_DATARAM + offset)
-
-#define ONENAND_SPARE_AREA(this, offset) \
- (this->base + ONENAND_SPARERAM + offset)
-
-#define ONENAND_GET_WP_STATUS(this) \
- (readw(this->base + ONENAND_REG_WP_STATUS))
-
-#define ONENAND_SET_WP_STATUS(v, this) \
- (writew(v, this->base + ONENAND_REG_WP_STATUS))
-
-/* It has all 0xff chars */
-#define MAX_ONENAND_PAGESIZE (4096 + 128)
-static unsigned char *ffchars;
-
-#if CONFIG_FLEXONENAND
-#define PARTITION_NAME "Flex-OneNAND simulator partition"
-#else
-#define PARTITION_NAME "OneNAND simulator partition"
-#endif
-
-static struct mtd_partition os_partitions[] = {
- {
- .name = PARTITION_NAME,
- .offset = 0,
- .size = MTDPART_SIZ_FULL,
- },
-};
-
-/*
- * OneNAND simulator mtd
- */
-struct onenand_info {
- struct mtd_info mtd;
- struct mtd_partition *parts;
- struct onenand_chip onenand;
- struct onenand_flash flash;
-};
-
-static struct onenand_info *info;
-
-#define DPRINTK(format, args...) \
-do { \
- printk(KERN_DEBUG "%s[%d]: " format "\n", __func__, \
- __LINE__, ##args); \
-} while (0)
-
-/**
- * onenand_lock_handle - Handle Lock scheme
- * @this: OneNAND device structure
- * @cmd: The command to be sent
- *
- * Send lock command to OneNAND device.
- * The lock scheme depends on chip type.
- */
-static void onenand_lock_handle(struct onenand_chip *this, int cmd)
-{
- int block_lock_scheme;
- int status;
-
- status = ONENAND_GET_WP_STATUS(this);
- block_lock_scheme = !(this->options & ONENAND_HAS_CONT_LOCK);
-
- switch (cmd) {
- case ONENAND_CMD_UNLOCK:
- case ONENAND_CMD_UNLOCK_ALL:
- if (block_lock_scheme)
- ONENAND_SET_WP_STATUS(ONENAND_WP_US, this);
- else
- ONENAND_SET_WP_STATUS(status | ONENAND_WP_US, this);
- break;
-
- case ONENAND_CMD_LOCK:
- if (block_lock_scheme)
- ONENAND_SET_WP_STATUS(ONENAND_WP_LS, this);
- else
- ONENAND_SET_WP_STATUS(status | ONENAND_WP_LS, this);
- break;
-
- case ONENAND_CMD_LOCK_TIGHT:
- if (block_lock_scheme)
- ONENAND_SET_WP_STATUS(ONENAND_WP_LTS, this);
- else
- ONENAND_SET_WP_STATUS(status | ONENAND_WP_LTS, this);
- break;
-
- default:
- break;
- }
-}
-
-/**
- * onenand_bootram_handle - Handle BootRAM area
- * @this: OneNAND device structure
- * @cmd: The command to be sent
- *
- * Emulate BootRAM area. It is possible to do basic operation using BootRAM.
- */
-static void onenand_bootram_handle(struct onenand_chip *this, int cmd)
-{
- switch (cmd) {
- case ONENAND_CMD_READID:
- writew(manuf_id, this->base);
- writew(device_id, this->base + 2);
- writew(version_id, this->base + 4);
- break;
-
- default:
- /* REVIST: Handle other commands */
- break;
- }
-}
-
-/**
- * onenand_update_interrupt - Set interrupt register
- * @this: OneNAND device structure
- * @cmd: The command to be sent
- *
- * Update interrupt register. The status depends on command.
- */
-static void onenand_update_interrupt(struct onenand_chip *this, int cmd)
-{
- int interrupt = ONENAND_INT_MASTER;
-
- switch (cmd) {
- case ONENAND_CMD_READ:
- case ONENAND_CMD_READOOB:
- interrupt |= ONENAND_INT_READ;
- break;
-
- case ONENAND_CMD_PROG:
- case ONENAND_CMD_PROGOOB:
- interrupt |= ONENAND_INT_WRITE;
- break;
-
- case ONENAND_CMD_ERASE:
- interrupt |= ONENAND_INT_ERASE;
- break;
-
- case ONENAND_CMD_RESET:
- interrupt |= ONENAND_INT_RESET;
- break;
-
- default:
- break;
- }
-
- writew(interrupt, this->base + ONENAND_REG_INTERRUPT);
-}
-
-/**
- * onenand_check_overwrite - Check if over-write happened
- * @dest: The destination pointer
- * @src: The source pointer
- * @count: The length to be check
- *
- * Returns: 0 on same, otherwise 1
- *
- * Compare the source with destination
- */
-static int onenand_check_overwrite(void *dest, void *src, size_t count)
-{
- unsigned int *s = (unsigned int *) src;
- unsigned int *d = (unsigned int *) dest;
- int i;
-
- count >>= 2;
- for (i = 0; i < count; i++)
- if ((*s++ ^ *d++) != 0)
- return 1;
-
- return 0;
-}
-
-/**
- * onenand_data_handle - Handle OneNAND Core and DataRAM
- * @this: OneNAND device structure
- * @cmd: The command to be sent
- * @dataram: Which dataram used
- * @offset: The offset to OneNAND Core
- *
- * Copy data from OneNAND Core to DataRAM (read)
- * Copy data from DataRAM to OneNAND Core (write)
- * Erase the OneNAND Core (erase)
- */
-static void onenand_data_handle(struct onenand_chip *this, int cmd,
- int dataram, unsigned int offset)
-{
- struct mtd_info *mtd = &info->mtd;
- struct onenand_flash *flash = this->priv;
- int main_offset, spare_offset, die = 0;
- void __iomem *src;
- void __iomem *dest;
- unsigned int i;
- static int pi_operation;
- int erasesize, rgn;
-
- if (dataram) {
- main_offset = mtd->writesize;
- spare_offset = mtd->oobsize;
- } else {
- main_offset = 0;
- spare_offset = 0;
- }
-
- if (pi_operation) {
- die = readw(this->base + ONENAND_REG_START_ADDRESS2);
- die >>= ONENAND_DDP_SHIFT;
- }
-
- switch (cmd) {
- case FLEXONENAND_CMD_PI_ACCESS:
- pi_operation = 1;
- break;
-
- case ONENAND_CMD_RESET:
- pi_operation = 0;
- break;
-
- case ONENAND_CMD_READ:
- src = ONENAND_CORE(flash) + offset;
- dest = ONENAND_MAIN_AREA(this, main_offset);
- if (pi_operation) {
- writew(boundary[die], this->base + ONENAND_DATARAM);
- break;
- }
- memcpy(dest, src, mtd->writesize);
- /* Fall through */
-
- case ONENAND_CMD_READOOB:
- src = ONENAND_CORE_SPARE(flash, this, offset);
- dest = ONENAND_SPARE_AREA(this, spare_offset);
- memcpy(dest, src, mtd->oobsize);
- break;
-
- case ONENAND_CMD_PROG:
- src = ONENAND_MAIN_AREA(this, main_offset);
- dest = ONENAND_CORE(flash) + offset;
- if (pi_operation) {
- boundary[die] = readw(this->base + ONENAND_DATARAM);
- break;
- }
- /* To handle partial write */
- for (i = 0; i < (1 << mtd->subpage_sft); i++) {
- int off = i * this->subpagesize;
- if (!memcmp(src + off, ffchars, this->subpagesize))
- continue;
- if (memcmp(dest + off, ffchars, this->subpagesize) &&
- onenand_check_overwrite(dest + off, src + off, this->subpagesize))
- printk(KERN_ERR "over-write happened at 0x%08x\n", offset);
- memcpy(dest + off, src + off, this->subpagesize);
- }
- /* Fall through */
-
- case ONENAND_CMD_PROGOOB:
- src = ONENAND_SPARE_AREA(this, spare_offset);
- /* Check all data is 0xff chars */
- if (!memcmp(src, ffchars, mtd->oobsize))
- break;
-
- dest = ONENAND_CORE_SPARE(flash, this, offset);
- if (memcmp(dest, ffchars, mtd->oobsize) &&
- onenand_check_overwrite(dest, src, mtd->oobsize))
- printk(KERN_ERR "OOB: over-write happened at 0x%08x\n",
- offset);
- memcpy(dest, src, mtd->oobsize);
- break;
-
- case ONENAND_CMD_ERASE:
- if (pi_operation)
- break;
-
- if (FLEXONENAND(this)) {
- rgn = flexonenand_region(mtd, offset);
- erasesize = mtd->eraseregions[rgn].erasesize;
- } else
- erasesize = mtd->erasesize;
-
- memset(ONENAND_CORE(flash) + offset, 0xff, erasesize);
- memset(ONENAND_CORE_SPARE(flash, this, offset), 0xff,
- (erasesize >> 5));
- break;
-
- default:
- break;
- }
-}
-
-/**
- * onenand_command_handle - Handle command
- * @this: OneNAND device structure
- * @cmd: The command to be sent
- *
- * Emulate OneNAND command.
- */
-static void onenand_command_handle(struct onenand_chip *this, int cmd)
-{
- unsigned long offset = 0;
- int block = -1, page = -1, bufferram = -1;
- int dataram = 0;
-
- switch (cmd) {
- case ONENAND_CMD_UNLOCK:
- case ONENAND_CMD_LOCK:
- case ONENAND_CMD_LOCK_TIGHT:
- case ONENAND_CMD_UNLOCK_ALL:
- onenand_lock_handle(this, cmd);
- break;
-
- case ONENAND_CMD_BUFFERRAM:
- /* Do nothing */
- return;
-
- default:
- block = (int) readw(this->base + ONENAND_REG_START_ADDRESS1);
- if (block & (1 << ONENAND_DDP_SHIFT)) {
- block &= ~(1 << ONENAND_DDP_SHIFT);
- /* The half of chip block */
- block += this->chipsize >> (this->erase_shift + 1);
- }
- if (cmd == ONENAND_CMD_ERASE)
- break;
-
- page = (int) readw(this->base + ONENAND_REG_START_ADDRESS8);
- page = (page >> ONENAND_FPA_SHIFT);
- bufferram = (int) readw(this->base + ONENAND_REG_START_BUFFER);
- bufferram >>= ONENAND_BSA_SHIFT;
- bufferram &= ONENAND_BSA_DATARAM1;
- dataram = (bufferram == ONENAND_BSA_DATARAM1) ? 1 : 0;
- break;
- }
-
- if (block != -1)
- offset = onenand_addr(this, block);
-
- if (page != -1)
- offset += page << this->page_shift;
-
- onenand_data_handle(this, cmd, dataram, offset);
-
- onenand_update_interrupt(this, cmd);
-}
-
-/**
- * onenand_writew - [OneNAND Interface] Emulate write operation
- * @value: value to write
- * @addr: address to write
- *
- * Write OneNAND register with value
- */
-static void onenand_writew(unsigned short value, void __iomem * addr)
-{
- struct onenand_chip *this = info->mtd.priv;
-
- /* BootRAM handling */
- if (addr < this->base + ONENAND_DATARAM) {
- onenand_bootram_handle(this, value);
- return;
- }
- /* Command handling */
- if (addr == this->base + ONENAND_REG_COMMAND)
- onenand_command_handle(this, value);
-
- writew(value, addr);
-}
-
-/**
- * flash_init - Initialize OneNAND simulator
- * @flash: OneNAND simulator data strucutres
- *
- * Initialize OneNAND simulator.
- */
-static int __init flash_init(struct onenand_flash *flash)
-{
- int density, size;
- int buffer_size;
-
- flash->base = kzalloc(131072, GFP_KERNEL);
- if (!flash->base) {
- printk(KERN_ERR "Unable to allocate base address.\n");
- return -ENOMEM;
- }
-
- density = device_id >> ONENAND_DEVICE_DENSITY_SHIFT;
- density &= ONENAND_DEVICE_DENSITY_MASK;
- size = ((16 << 20) << density);
-
- ONENAND_CORE(flash) = vmalloc(size + (size >> 5));
- if (!ONENAND_CORE(flash)) {
- printk(KERN_ERR "Unable to allocate nand core address.\n");
- kfree(flash->base);
- return -ENOMEM;
- }
-
- memset(ONENAND_CORE(flash), 0xff, size + (size >> 5));
-
- /* Setup registers */
- writew(manuf_id, flash->base + ONENAND_REG_MANUFACTURER_ID);
- writew(device_id, flash->base + ONENAND_REG_DEVICE_ID);
- writew(version_id, flash->base + ONENAND_REG_VERSION_ID);
- writew(technology_id, flash->base + ONENAND_REG_TECHNOLOGY);
-
- if (density < 2 && (!CONFIG_FLEXONENAND))
- buffer_size = 0x0400; /* 1KiB page */
- else
- buffer_size = 0x0800; /* 2KiB page */
- writew(buffer_size, flash->base + ONENAND_REG_DATA_BUFFER_SIZE);
-
- return 0;
-}
-
-/**
- * flash_exit - Clean up OneNAND simulator
- * @flash: OneNAND simulator data structures
- *
- * Clean up OneNAND simulator.
- */
-static void flash_exit(struct onenand_flash *flash)
-{
- vfree(ONENAND_CORE(flash));
- kfree(flash->base);
-}
-
-static int __init onenand_sim_init(void)
-{
- /* Allocate all 0xff chars pointer */
- ffchars = kmalloc(MAX_ONENAND_PAGESIZE, GFP_KERNEL);
- if (!ffchars) {
- printk(KERN_ERR "Unable to allocate ff chars.\n");
- return -ENOMEM;
- }
- memset(ffchars, 0xff, MAX_ONENAND_PAGESIZE);
-
- /* Allocate OneNAND simulator mtd pointer */
- info = kzalloc(sizeof(struct onenand_info), GFP_KERNEL);
- if (!info) {
- printk(KERN_ERR "Unable to allocate core structures.\n");
- kfree(ffchars);
- return -ENOMEM;
- }
-
- /* Override write_word function */
- info->onenand.write_word = onenand_writew;
-
- if (flash_init(&info->flash)) {
- printk(KERN_ERR "Unable to allocate flash.\n");
- kfree(ffchars);
- kfree(info);
- return -ENOMEM;
- }
-
- info->parts = os_partitions;
-
- info->onenand.base = info->flash.base;
- info->onenand.priv = &info->flash;
-
- info->mtd.name = "OneNAND simulator";
- info->mtd.priv = &info->onenand;
- info->mtd.owner = THIS_MODULE;
-
- if (onenand_scan(&info->mtd, 1)) {
- flash_exit(&info->flash);
- kfree(ffchars);
- kfree(info);
- return -ENXIO;
- }
-
- mtd_device_register(&info->mtd, info->parts,
- ARRAY_SIZE(os_partitions));
-
- return 0;
-}
-
-static void __exit onenand_sim_exit(void)
-{
- struct onenand_chip *this = info->mtd.priv;
- struct onenand_flash *flash = this->priv;
-
- onenand_release(&info->mtd);
- flash_exit(flash);
- kfree(ffchars);
- kfree(info);
-}
-
-module_init(onenand_sim_init);
-module_exit(onenand_sim_exit);
-
-MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
-MODULE_DESCRIPTION("The OneNAND flash simulator");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index ee70577..dada66b 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -1700,7 +1700,8 @@ static int bfin_mac_probe(struct platform_device *pdev)
}
bfin_mac_hwtstamp_init(ndev);
- if (bfin_phc_init(ndev, &pdev->dev)) {
+ rc = bfin_phc_init(ndev, &pdev->dev);
+ if (rc) {
dev_err(&pdev->dev, "Cannot register PHC device!\n");
goto out_err_phc;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index e1e5bb9..fd7b547 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -2640,9 +2640,8 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
req = get_mac_list_cmd.va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_MAC_LIST, sizeof(*req),
- wrb, &get_mac_list_cmd);
-
+ OPCODE_COMMON_GET_MAC_LIST,
+ get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
req->hdr.domain = domain;
req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
req->perm_override = 1;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 6c52a60..a444110 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1827,7 +1827,7 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
mdelay(1);
} else {
be_rx_compl_discard(rxo, rxcp);
- be_cq_notify(adapter, rx_cq->id, true, 1);
+ be_cq_notify(adapter, rx_cq->id, false, 1);
if (rxcp->num_rcvd == 0)
break;
}
@@ -2533,11 +2533,6 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
q = &rxo->q;
if (q->created) {
be_cmd_rxq_destroy(adapter, q);
- /* After the rxq is invalidated, wait for a grace time
- * of 1ms for all dma to end and the flush compl to
- * arrive
- */
- mdelay(1);
be_rx_cq_clean(rxo);
}
be_queue_free(adapter, q);
@@ -2564,6 +2559,7 @@ static int be_close(struct net_device *netdev)
* all tx skbs are freed.
*/
be_tx_compl_clean(adapter);
+ netif_tx_disable(netdev);
be_rx_qs_destroy(adapter);
@@ -2672,6 +2668,7 @@ static int be_open(struct net_device *netdev)
if (!status)
be_link_status_update(adapter, link_status);
+ netif_tx_start_all_queues(netdev);
be_roce_dev_open(adapter);
return 0;
err:
@@ -2783,6 +2780,8 @@ static void be_vf_clear(struct be_adapter *adapter)
goto done;
}
+ pci_disable_sriov(adapter->pdev);
+
for_all_vfs(adapter, vf_cfg, vf) {
if (lancer_chip(adapter))
be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
@@ -2792,7 +2791,6 @@ static void be_vf_clear(struct be_adapter *adapter)
be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
}
- pci_disable_sriov(adapter->pdev);
done:
kfree(adapter->vf_cfg);
adapter->num_vfs = 0;
@@ -2889,13 +2887,8 @@ static int be_vf_setup(struct be_adapter *adapter)
dev_info(dev, "Device supports %d VFs and not %d\n",
adapter->dev_num_vfs, num_vfs);
adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
-
- status = pci_enable_sriov(adapter->pdev, num_vfs);
- if (status) {
- dev_err(dev, "SRIOV enable failed\n");
- adapter->num_vfs = 0;
+ if (!adapter->num_vfs)
return 0;
- }
}
status = be_vf_setup_init(adapter);
@@ -2944,6 +2937,15 @@ static int be_vf_setup(struct be_adapter *adapter)
be_cmd_enable_vf(adapter, vf + 1);
}
+
+ if (!old_vfs) {
+ status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+ if (status) {
+ dev_err(dev, "SRIOV enable failed\n");
+ adapter->num_vfs = 0;
+ goto err;
+ }
+ }
return 0;
err:
dev_err(dev, "VF setup failed\n");
@@ -3198,7 +3200,7 @@ static int be_setup(struct be_adapter *adapter)
be_cmd_set_flow_control(adapter, adapter->tx_fc,
adapter->rx_fc);
- if (be_physfn(adapter) && num_vfs) {
+ if (be_physfn(adapter)) {
if (adapter->dev_num_vfs)
be_vf_setup(adapter);
else
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index ceb4d43..9ce5b71 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -198,6 +198,11 @@ struct bufdesc_ex {
#define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
#define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
+struct fec_enet_delayed_work {
+ struct delayed_work delay_work;
+ bool timeout;
+};
+
/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
* tx_bd_base always point to the base of the buffer descriptors. The
* cur_rx and cur_tx point to the currently available buffer.
@@ -232,9 +237,6 @@ struct fec_enet_private {
/* The ring entries to be free()ed */
struct bufdesc *dirty_tx;
- /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
- spinlock_t hw_lock;
-
struct platform_device *pdev;
int opened;
@@ -269,7 +271,7 @@ struct fec_enet_private {
int hwts_rx_en;
int hwts_tx_en;
struct timer_list time_keep;
-
+ struct fec_enet_delayed_work delay_work;
};
void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index e25bf83..aff0310 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -445,6 +445,13 @@ fec_restart(struct net_device *ndev, int duplex)
u32 rcntl = OPT_FRAME_SIZE | 0x04;
u32 ecntl = 0x2; /* ETHEREN */
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ napi_disable(&fep->napi);
+ netif_stop_queue(ndev);
+ netif_tx_lock(ndev);
+ }
+
/* Whack a reset. We should wait for this. */
writel(1, fep->hwp + FEC_ECNTRL);
udelay(10);
@@ -605,6 +612,13 @@ fec_restart(struct net_device *ndev, int duplex)
/* Enable interrupts we wish to service */
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+
+ if (netif_running(ndev)) {
+ netif_device_attach(ndev);
+ napi_enable(&fep->napi);
+ netif_wake_queue(ndev);
+ netif_tx_unlock(ndev);
+ }
}
static void
@@ -644,8 +658,22 @@ fec_timeout(struct net_device *ndev)
ndev->stats.tx_errors++;
- fec_restart(ndev, fep->full_duplex);
- netif_wake_queue(ndev);
+ fep->delay_work.timeout = true;
+ schedule_delayed_work(&(fep->delay_work.delay_work), 0);
+}
+
+static void fec_enet_work(struct work_struct *work)
+{
+ struct fec_enet_private *fep =
+ container_of(work,
+ struct fec_enet_private,
+ delay_work.delay_work.work);
+
+ if (fep->delay_work.timeout) {
+ fep->delay_work.timeout = false;
+ fec_restart(fep->netdev, fep->full_duplex);
+ netif_wake_queue(fep->netdev);
+ }
}
static void
@@ -1024,16 +1052,12 @@ static void fec_enet_adjust_link(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct phy_device *phy_dev = fep->phy_dev;
- unsigned long flags;
-
int status_change = 0;
- spin_lock_irqsave(&fep->hw_lock, flags);
-
/* Prevent a state halted on mii error */
if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
phy_dev->state = PHY_RESUMING;
- goto spin_unlock;
+ return;
}
if (phy_dev->link) {
@@ -1061,9 +1085,6 @@ static void fec_enet_adjust_link(struct net_device *ndev)
}
}
-spin_unlock:
- spin_unlock_irqrestore(&fep->hw_lock, flags);
-
if (status_change)
phy_print_status(phy_dev);
}
@@ -1732,7 +1753,6 @@ static int fec_enet_init(struct net_device *ndev)
return -ENOMEM;
memset(cbd_base, 0, PAGE_SIZE);
- spin_lock_init(&fep->hw_lock);
fep->netdev = ndev;
@@ -1952,6 +1972,7 @@ fec_probe(struct platform_device *pdev)
if (fep->bufdesc_ex && fep->ptp_clock)
netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
+ INIT_DELAYED_WORK(&(fep->delay_work.delay_work), fec_enet_work);
return 0;
failed_register:
@@ -1984,6 +2005,7 @@ fec_drv_remove(struct platform_device *pdev)
struct fec_enet_private *fep = netdev_priv(ndev);
int i;
+ cancel_delayed_work_sync(&(fep->delay_work.delay_work));
unregister_netdev(ndev);
fec_enet_mii_remove(fep);
del_timer_sync(&fep->time_keep);
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 07f6baa..9a95abf 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -912,8 +912,10 @@ static int efx_ptp_probe_channel(struct efx_channel *channel)
ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info,
&efx->pci_dev->dev);
- if (!ptp->phc_clock)
+ if (IS_ERR(ptp->phc_clock)) {
+ rc = PTR_ERR(ptp->phc_clock);
goto fail3;
+ }
INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 66e025a..f3c2d03 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -930,7 +930,7 @@ static int tile_net_setup_interrupts(struct net_device *dev)
if (info->has_iqueue) {
gxio_mpipe_request_notif_ring_interrupt(
&context, cpu_x(cpu), cpu_y(cpu),
- 1, ingress_irq, info->iqueue.ring);
+ KERNEL_PL, ingress_irq, info->iqueue.ring);
}
}
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index c655fe6..5734480 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -1990,7 +1990,8 @@ spider_net_open(struct net_device *netdev)
goto alloc_rx_failed;
/* Allocate rx skbs */
- if (spider_net_alloc_rx_skbs(card))
+ result = spider_net_alloc_rx_skbs(card);
+ if (result)
goto alloc_skbs_failed;
spider_net_set_multi(netdev);
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index a06fca6..22b4527 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -609,7 +609,7 @@ static int bfin_sir_open(struct net_device *dev)
{
struct bfin_sir_self *self = netdev_priv(dev);
struct bfin_sir_port *port = self->sir_port;
- int err = -ENOMEM;
+ int err;
self->newspeed = 0;
self->speed = 9600;
@@ -623,8 +623,10 @@ static int bfin_sir_open(struct net_device *dev)
bfin_sir_set_speed(port, 9600);
self->irlap = irlap_open(dev, &self->qos, DRIVER_NAME);
- if (!self->irlap)
+ if (!self->irlap) {
+ err = -ENOMEM;
goto err_irlap;
+ }
INIT_WORK(&self->work, bfin_sir_send_work);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 4503452..1e11f2b 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -126,7 +126,7 @@ config MDIO_BITBANG
config MDIO_GPIO
tristate "Support for GPIO lib-based bitbanged MDIO buses"
- depends on MDIO_BITBANG && GENERIC_GPIO
+ depends on MDIO_BITBANG && GPIOLIB
---help---
Supports GPIO lib-based MDIO busses.
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 24fbec2..078795f 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -613,6 +613,13 @@ static const struct usb_device_id products [] = {
.driver_info = 0,
},
+/* Dell Wireless 5804 (Novatel E371) - handled by qmi_wwan */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x819b, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* AnyDATA ADU960S - handled by qmi_wwan */
{
USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 834e405..cf887c2 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -501,6 +501,13 @@ static const struct usb_device_id products[] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&qmi_wwan_info,
},
+ { /* Dell Wireless 5804 (Novatel E371) */
+ USB_DEVICE_AND_INTERFACE_INFO(0x413C, 0x819b,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
{ /* ADU960S */
USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a,
USB_CLASS_COMM,
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index a923d61..a79e9d3 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -426,6 +426,13 @@ static void sierra_net_dosync(struct usbnet *dev)
dev_dbg(&dev->udev->dev, "%s", __func__);
+ /* The SIERRA_NET_HIP_MSYNC_ID command appears to request that the
+ * firmware restart itself. After restarting, the modem will respond
+ * with the SIERRA_NET_HIP_RESTART_ID indication. The driver continues
+ * sending MSYNC commands every few seconds until it receives the
+ * RESTART event from the firmware
+ */
+
/* tell modem we are ready */
status = sierra_net_send_sync(dev);
if (status < 0)
@@ -704,6 +711,9 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
/* set context index initially to 0 - prepares tx hdr template */
sierra_net_set_ctx_index(priv, 0);
+ /* prepare sync message template */
+ memcpy(priv->sync_msg, sync_tmplate, sizeof(priv->sync_msg));
+
/* decrease the rx_urb_size and max_tx_size to 4k on USB 1.1 */
dev->rx_urb_size = SIERRA_NET_RX_URB_SIZE;
if (dev->udev->speed != USB_SPEED_HIGH)
@@ -739,11 +749,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
kfree(priv);
return -ENODEV;
}
- /* prepare sync message from template */
- memcpy(priv->sync_msg, sync_tmplate, sizeof(priv->sync_msg));
-
- /* initiate the sync sequence */
- sierra_net_dosync(dev);
return 0;
}
@@ -766,8 +771,9 @@ static void sierra_net_unbind(struct usbnet *dev, struct usb_interface *intf)
netdev_err(dev->net,
"usb_control_msg failed, status %d\n", status);
- sierra_net_set_private(dev, NULL);
+ usbnet_status_stop(dev);
+ sierra_net_set_private(dev, NULL);
kfree(priv);
}
@@ -908,6 +914,24 @@ static const struct driver_info sierra_net_info_direct_ip = {
.tx_fixup = sierra_net_tx_fixup,
};
+static int
+sierra_net_probe(struct usb_interface *udev, const struct usb_device_id *prod)
+{
+ int ret;
+
+ ret = usbnet_probe(udev, prod);
+ if (ret == 0) {
+ struct usbnet *dev = usb_get_intfdata(udev);
+
+ ret = usbnet_status_start(dev, GFP_KERNEL);
+ if (ret == 0) {
+ /* Interrupt URB now set up; initiate sync sequence */
+ sierra_net_dosync(dev);
+ }
+ }
+ return ret;
+}
+
#define DIRECT_IP_DEVICE(vend, prod) \
{USB_DEVICE_INTERFACE_NUMBER(vend, prod, 7), \
.driver_info = (unsigned long)&sierra_net_info_direct_ip}, \
@@ -930,7 +954,7 @@ MODULE_DEVICE_TABLE(usb, products);
static struct usb_driver sierra_net_driver = {
.name = "sierra_net",
.id_table = products,
- .probe = usbnet_probe,
+ .probe = sierra_net_probe,
.disconnect = usbnet_disconnect,
.suspend = usbnet_suspend,
.resume = usbnet_resume,
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 1e5a9b7..f95cb03 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -252,6 +252,70 @@ static int init_status (struct usbnet *dev, struct usb_interface *intf)
return 0;
}
+/* Submit the interrupt URB if not previously submitted, increasing refcount */
+int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags)
+{
+ int ret = 0;
+
+ WARN_ON_ONCE(dev->interrupt == NULL);
+ if (dev->interrupt) {
+ mutex_lock(&dev->interrupt_mutex);
+
+ if (++dev->interrupt_count == 1)
+ ret = usb_submit_urb(dev->interrupt, mem_flags);
+
+ dev_dbg(&dev->udev->dev, "incremented interrupt URB count to %d\n",
+ dev->interrupt_count);
+ mutex_unlock(&dev->interrupt_mutex);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usbnet_status_start);
+
+/* For resume; submit interrupt URB if previously submitted */
+static int __usbnet_status_start_force(struct usbnet *dev, gfp_t mem_flags)
+{
+ int ret = 0;
+
+ mutex_lock(&dev->interrupt_mutex);
+ if (dev->interrupt_count) {
+ ret = usb_submit_urb(dev->interrupt, mem_flags);
+ dev_dbg(&dev->udev->dev,
+ "submitted interrupt URB for resume\n");
+ }
+ mutex_unlock(&dev->interrupt_mutex);
+ return ret;
+}
+
+/* Kill the interrupt URB if all submitters want it killed */
+void usbnet_status_stop(struct usbnet *dev)
+{
+ if (dev->interrupt) {
+ mutex_lock(&dev->interrupt_mutex);
+ WARN_ON(dev->interrupt_count == 0);
+
+ if (dev->interrupt_count && --dev->interrupt_count == 0)
+ usb_kill_urb(dev->interrupt);
+
+ dev_dbg(&dev->udev->dev,
+ "decremented interrupt URB count to %d\n",
+ dev->interrupt_count);
+ mutex_unlock(&dev->interrupt_mutex);
+ }
+}
+EXPORT_SYMBOL_GPL(usbnet_status_stop);
+
+/* For suspend; always kill interrupt URB */
+static void __usbnet_status_stop_force(struct usbnet *dev)
+{
+ if (dev->interrupt) {
+ mutex_lock(&dev->interrupt_mutex);
+ usb_kill_urb(dev->interrupt);
+ dev_dbg(&dev->udev->dev, "killed interrupt URB for suspend\n");
+ mutex_unlock(&dev->interrupt_mutex);
+ }
+}
+
/* Passes this packet up the stack, updating its accounting.
* Some link protocols batch packets, so their rx_fixup paths
* can return clones as well as just modify the original skb.
@@ -725,7 +789,7 @@ int usbnet_stop (struct net_device *net)
if (!(info->flags & FLAG_AVOID_UNLINK_URBS))
usbnet_terminate_urbs(dev);
- usb_kill_urb(dev->interrupt);
+ usbnet_status_stop(dev);
usbnet_purge_paused_rxq(dev);
@@ -787,7 +851,7 @@ int usbnet_open (struct net_device *net)
/* start any status interrupt transfer */
if (dev->interrupt) {
- retval = usb_submit_urb (dev->interrupt, GFP_KERNEL);
+ retval = usbnet_status_start(dev, GFP_KERNEL);
if (retval < 0) {
netif_err(dev, ifup, dev->net,
"intr submit %d\n", retval);
@@ -1458,6 +1522,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
dev->delay.data = (unsigned long) dev;
init_timer (&dev->delay);
mutex_init (&dev->phy_mutex);
+ mutex_init(&dev->interrupt_mutex);
+ dev->interrupt_count = 0;
dev->net = net;
strcpy (net->name, "usb%d");
@@ -1593,7 +1659,7 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
*/
netif_device_detach (dev->net);
usbnet_terminate_urbs(dev);
- usb_kill_urb(dev->interrupt);
+ __usbnet_status_stop_force(dev);
/*
* reattach so runtime management can use and
@@ -1613,9 +1679,8 @@ int usbnet_resume (struct usb_interface *intf)
int retval;
if (!--dev->suspend_count) {
- /* resume interrupt URBs */
- if (dev->interrupt && test_bit(EVENT_DEV_OPEN, &dev->flags))
- usb_submit_urb(dev->interrupt, GFP_NOIO);
+ /* resume interrupt URB if it was previously submitted */
+ __usbnet_status_start_force(dev, GFP_NOIO);
spin_lock_irq(&dev->txq.lock);
while ((res = usb_get_from_anchor(&dev->deferred))) {
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 23049ae..d5a57a9 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -84,13 +84,10 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
phy = get_phy_device(mdio, addr, is_c45);
if (!phy || IS_ERR(phy)) {
- phy = phy_device_create(mdio, addr, 0, false, NULL);
- if (!phy || IS_ERR(phy)) {
- dev_err(&mdio->dev,
- "error creating PHY at address %i\n",
- addr);
- continue;
- }
+ dev_err(&mdio->dev,
+ "cannot get PHY at address %i\n",
+ addr);
+ continue;
}
/* Associate the OF node with the device structure so it
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 748f8f3..32e66a6 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -174,6 +174,7 @@ int pci_bus_add_device(struct pci_dev *dev)
* Can not put in pci_device_add yet because resources
* are not assigned yet for some devices.
*/
+ pci_fixup_device(pci_fixup_final, dev);
pci_create_sysfs_dev_files(dev);
dev->match_driver = true;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index d40bed7..2c10752 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -563,8 +563,10 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
entry->msi_attrib.pos = dev->msi_cap;
- entry->mask_pos = dev->msi_cap + (control & PCI_MSI_FLAGS_64BIT) ?
- PCI_MSI_MASK_64 : PCI_MSI_MASK_32;
+ if (control & PCI_MSI_FLAGS_64BIT)
+ entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
+ else
+ entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
/* All MSIs are unmasked by default, Mask them all */
if (entry->msi_attrib.maskbit)
pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 631aeb7..70f10fa 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1341,7 +1341,6 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
list_add_tail(&dev->bus_list, &bus->devices);
up_write(&pci_bus_sem);
- pci_fixup_device(pci_fixup_final, dev);
ret = pcibios_add_device(dev);
WARN_ON(ret < 0);
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index a3a851e..18c0d8d 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -68,12 +68,6 @@ MODULE_LICENSE("Dual MPL/GPL");
#if !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B)
-/* The RPX series use SLOT_B */
-#if defined(CONFIG_RPXCLASSIC) || defined(CONFIG_RPXLITE)
-#define CONFIG_PCMCIA_SLOT_B
-#define CONFIG_BD_IS_MHZ
-#endif
-
/* The ADS board use SLOT_A */
#ifdef CONFIG_ADS
#define CONFIG_PCMCIA_SLOT_A
@@ -253,81 +247,6 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev);
#define PCMCIA_BMT_LIMIT (15*4) /* Bus Monitor Timeout value */
-/* ------------------------------------------------------------------------- */
-/* board specific stuff: */
-/* voltage_set(), hardware_enable() and hardware_disable() */
-/* ------------------------------------------------------------------------- */
-/* RPX Boards from Embedded Planet */
-
-#if defined(CONFIG_RPXCLASSIC) || defined(CONFIG_RPXLITE)
-
-/* The RPX boards seems to have it's bus monitor timeout set to 6*8 clocks.
- * SYPCR is write once only, therefore must the slowest memory be faster
- * than the bus monitor or we will get a machine check due to the bus timeout.
- */
-
-#define PCMCIA_BOARD_MSG "RPX CLASSIC or RPX LITE"
-
-#undef PCMCIA_BMT_LIMIT
-#define PCMCIA_BMT_LIMIT (6*8)
-
-static int voltage_set(int slot, int vcc, int vpp)
-{
- u32 reg = 0;
-
- switch (vcc) {
- case 0:
- break;
- case 33:
- reg |= BCSR1_PCVCTL4;
- break;
- case 50:
- reg |= BCSR1_PCVCTL5;
- break;
- default:
- return 1;
- }
-
- switch (vpp) {
- case 0:
- break;
- case 33:
- case 50:
- if (vcc == vpp)
- reg |= BCSR1_PCVCTL6;
- else
- return 1;
- break;
- case 120:
- reg |= BCSR1_PCVCTL7;
- default:
- return 1;
- }
-
- if (!((vcc == 50) || (vcc == 0)))
- return 1;
-
- /* first, turn off all power */
-
- out_be32(((u32 *) RPX_CSR_ADDR),
- in_be32(((u32 *) RPX_CSR_ADDR)) & ~(BCSR1_PCVCTL4 |
- BCSR1_PCVCTL5 |
- BCSR1_PCVCTL6 |
- BCSR1_PCVCTL7));
-
- /* enable new powersettings */
-
- out_be32(((u32 *) RPX_CSR_ADDR), in_be32(((u32 *) RPX_CSR_ADDR)) | reg);
-
- return 0;
-}
-
-#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
-#define hardware_enable(_slot_) /* No hardware to enable */
-#define hardware_disable(_slot_) /* No hardware to disable */
-
-#endif /* CONFIG_RPXCLASSIC */
-
/* FADS Boards from Motorola */
#if defined(CONFIG_FADS)
@@ -419,65 +338,6 @@ static inline int voltage_set(int slot, int vcc, int vpp)
#endif
-/* ------------------------------------------------------------------------- */
-/* Motorola MBX860 */
-
-#if defined(CONFIG_MBX)
-
-#define PCMCIA_BOARD_MSG "MBX"
-
-static int voltage_set(int slot, int vcc, int vpp)
-{
- u8 reg = 0;
-
- switch (vcc) {
- case 0:
- break;
- case 33:
- reg |= CSR2_VCC_33;
- break;
- case 50:
- reg |= CSR2_VCC_50;
- break;
- default:
- return 1;
- }
-
- switch (vpp) {
- case 0:
- break;
- case 33:
- case 50:
- if (vcc == vpp)
- reg |= CSR2_VPP_VCC;
- else
- return 1;
- break;
- case 120:
- if ((vcc == 33) || (vcc == 50))
- reg |= CSR2_VPP_12;
- else
- return 1;
- default:
- return 1;
- }
-
- /* first, turn off all power */
- out_8((u8 *) MBX_CSR2_ADDR,
- in_8((u8 *) MBX_CSR2_ADDR) & ~(CSR2_VCC_MASK | CSR2_VPP_MASK));
-
- /* enable new powersettings */
- out_8((u8 *) MBX_CSR2_ADDR, in_8((u8 *) MBX_CSR2_ADDR) | reg);
-
- return 0;
-}
-
-#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
-#define hardware_enable(_slot_) /* No hardware to enable */
-#define hardware_disable(_slot_) /* No hardware to disable */
-
-#endif /* CONFIG_MBX */
-
#if defined(CONFIG_PRxK)
#include <asm/cpld.h>
extern volatile fpga_pc_regs *fpga_pc;
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig
index 0e1f99c..f8a2ae4 100644
--- a/drivers/pinctrl/sh-pfc/Kconfig
+++ b/drivers/pinctrl/sh-pfc/Kconfig
@@ -6,7 +6,7 @@ if ARCH_SHMOBILE || SUPERH
config PINCTRL_SH_PFC
# XXX move off the gpio dependency
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select GPIO_SH_PFC if ARCH_REQUIRE_GPIOLIB
select PINMUX
select PINCONF
@@ -40,19 +40,19 @@ config PINCTRL_PFC_R8A7779
config PINCTRL_PFC_SH7203
def_bool y
depends on CPU_SUBTYPE_SH7203
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select PINCTRL_SH_PFC
config PINCTRL_PFC_SH7264
def_bool y
depends on CPU_SUBTYPE_SH7264
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select PINCTRL_SH_PFC
config PINCTRL_PFC_SH7269
def_bool y
depends on CPU_SUBTYPE_SH7269
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select PINCTRL_SH_PFC
config PINCTRL_PFC_SH7372
@@ -68,55 +68,55 @@ config PINCTRL_PFC_SH73A0
config PINCTRL_PFC_SH7720
def_bool y
depends on CPU_SUBTYPE_SH7720
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select PINCTRL_SH_PFC
config PINCTRL_PFC_SH7722
def_bool y
depends on CPU_SUBTYPE_SH7722
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select PINCTRL_SH_PFC
config PINCTRL_PFC_SH7723
def_bool y
depends on CPU_SUBTYPE_SH7723
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select PINCTRL_SH_PFC
config PINCTRL_PFC_SH7724
def_bool y
depends on CPU_SUBTYPE_SH7724
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select PINCTRL_SH_PFC
config PINCTRL_PFC_SH7734
def_bool y
depends on CPU_SUBTYPE_SH7734
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select PINCTRL_SH_PFC
config PINCTRL_PFC_SH7757
def_bool y
depends on CPU_SUBTYPE_SH7757
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select PINCTRL_SH_PFC
config PINCTRL_PFC_SH7785
def_bool y
depends on CPU_SUBTYPE_SH7785
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select PINCTRL_SH_PFC
config PINCTRL_PFC_SH7786
def_bool y
depends on CPU_SUBTYPE_SH7786
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select PINCTRL_SH_PFC
config PINCTRL_PFC_SHX3
def_bool y
depends on CPU_SUBTYPE_SHX3
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select PINCTRL_SH_PFC
endif
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index a5d97ea..8bb2644 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -66,7 +66,7 @@ config REGULATOR_USERSPACE_CONSUMER
config REGULATOR_GPIO
tristate "GPIO regulator support"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
This driver provides support for regulators that can be
controlled via gpios.
diff --git a/drivers/rtc/rtc-tile.c b/drivers/rtc/rtc-tile.c
index 249b653..fc3dee9 100644
--- a/drivers/rtc/rtc-tile.c
+++ b/drivers/rtc/rtc-tile.c
@@ -146,6 +146,7 @@ exit_driver_unregister:
*/
static void __exit tile_rtc_driver_exit(void)
{
+ platform_device_unregister(tile_rtc_platform_device);
platform_driver_unregister(&tile_rtc_platform_driver);
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 141d8c1..92a9345 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -62,7 +62,7 @@ config SPI_ALTERA
config SPI_ATH79
tristate "Atheros AR71XX/AR724X/AR913X SPI controller driver"
- depends on ATH79 && GENERIC_GPIO
+ depends on ATH79 && GPIOLIB
select SPI_BITBANG
help
This enables support for the SPI controller present on the
@@ -175,7 +175,7 @@ config SPI_FALCON
config SPI_GPIO
tristate "GPIO-based bitbanging SPI Master"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select SPI_BITBANG
help
This simple GPIO bitbanging SPI master uses the arch-neutral GPIO
@@ -259,7 +259,7 @@ config SPI_FSL_ESPI
config SPI_OC_TINY
tristate "OpenCores tiny SPI"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select SPI_BITBANG
help
This is the driver for OpenCores tiny SPI master controller.
@@ -457,7 +457,7 @@ config SPI_TOPCLIFF_PCH
config SPI_TXX9
tristate "Toshiba TXx9 SPI controller"
- depends on GENERIC_GPIO && CPU_TX49XX
+ depends on GPIOLIB && CPU_TX49XX
help
SPI driver for Toshiba TXx9 MIPS SoCs
diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c
index fa385a3..0907706 100644
--- a/drivers/ssb/driver_mipscore.c
+++ b/drivers/ssb/driver_mipscore.c
@@ -18,7 +18,7 @@
#include "ssb_private.h"
-static const char *part_probes[] = { "bcm47xxpart", NULL };
+static const char * const part_probes[] = { "bcm47xxpart", NULL };
static struct physmap_flash_data ssb_pflash_data = {
.part_probe_types = part_probes,
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 9f61d46..c0c95be 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -54,7 +54,7 @@ config ANDROID_TIMED_OUTPUT
config ANDROID_TIMED_GPIO
tristate "Android timed gpio driver"
- depends on GENERIC_GPIO && ANDROID_TIMED_OUTPUT
+ depends on GPIOLIB && ANDROID_TIMED_OUTPUT
default n
config ANDROID_LOW_MEMORY_KILLER
diff --git a/drivers/staging/iio/accel/Kconfig b/drivers/staging/iio/accel/Kconfig
index e2e786d..ad45dfb 100644
--- a/drivers/staging/iio/accel/Kconfig
+++ b/drivers/staging/iio/accel/Kconfig
@@ -61,7 +61,7 @@ config LIS3L02DQ
depends on SPI
select IIO_TRIGGER if IIO_BUFFER
depends on !IIO_BUFFER || IIO_KFIFO_BUF
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
Say yes here to build SPI support for the ST microelectronics
accelerometer. The driver supplies direct access via sysfs files
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index d990829..cabc7a3 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -73,7 +73,7 @@ config AD7780
config AD7816
tristate "Analog Devices AD7816/7/8 temperature sensor and ADC driver"
depends on SPI
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
Say yes here to build support for Analog Devices AD7816/7/8
temperature sensors and ADC.
diff --git a/drivers/staging/iio/addac/Kconfig b/drivers/staging/iio/addac/Kconfig
index 698a897..e6795e0 100644
--- a/drivers/staging/iio/addac/Kconfig
+++ b/drivers/staging/iio/addac/Kconfig
@@ -5,7 +5,7 @@ menu "Analog digital bi-direction converters"
config ADT7316
tristate "Analog Devices ADT7316/7/8 ADT7516/7/9 temperature sensor, ADC and DAC driver"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
Say yes here to build support for Analog Devices ADT7316, ADT7317, ADT7318
and ADT7516, ADT7517, ADT7519 temperature sensors, ADC and DAC.
diff --git a/drivers/staging/iio/resolver/Kconfig b/drivers/staging/iio/resolver/Kconfig
index 49f69ef..ce360f1 100644
--- a/drivers/staging/iio/resolver/Kconfig
+++ b/drivers/staging/iio/resolver/Kconfig
@@ -13,7 +13,7 @@ config AD2S90
config AD2S1200
tristate "Analog Devices ad2s1200/ad2s1205 driver"
depends on SPI
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
Say yes here to build support for Analog Devices spi resolver
to digital converters, ad2s1200 and ad2s1205, provides direct access
@@ -22,7 +22,7 @@ config AD2S1200
config AD2S1210
tristate "Analog Devices ad2s1210 driver"
depends on SPI
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
Say yes here to build support for Analog Devices spi resolver
to digital converters, ad2s1210, provides direct access via sysfs.
diff --git a/drivers/staging/iio/trigger/Kconfig b/drivers/staging/iio/trigger/Kconfig
index d44d3ad..1a051da 100644
--- a/drivers/staging/iio/trigger/Kconfig
+++ b/drivers/staging/iio/trigger/Kconfig
@@ -14,7 +14,7 @@ config IIO_PERIODIC_RTC_TRIGGER
config IIO_GPIO_TRIGGER
tristate "GPIO trigger"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
Provides support for using GPIO pins as IIO triggers.
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index a764f16..5e3c025 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -67,15 +67,16 @@ config THERMAL_GOV_USER_SPACE
Enable this to let the user space manage the platform thermals.
config CPU_THERMAL
- tristate "generic cpu cooling support"
+ bool "generic cpu cooling support"
depends on CPU_FREQ
select CPU_FREQ_TABLE
help
This implements the generic cpu cooling mechanism through frequency
- reduction, cpu hotplug and any other ways of reducing temperature. An
- ACPI version of this already exists(drivers/acpi/processor_thermal.c).
+ reduction. An ACPI version of this already exists
+ (drivers/acpi/processor_thermal.c).
This will be useful for platforms using the generic thermal interface
and not the ACPI interface.
+
If you want this support, you should say Y here.
config THERMAL_EMULATION
@@ -86,6 +87,10 @@ config THERMAL_EMULATION
user can manually input temperature and test the different trip
threshold behaviour for simulation purpose.
+ WARNING: Be careful while enabling this option on production systems,
+ because userland can easily disable the thermal policy by simply
+ flooding this sysfs node with low temperature values.
+
config SPEAR_THERMAL
bool "SPEAr thermal sensor driver"
depends on PLAT_SPEAR
@@ -117,15 +122,6 @@ config EXYNOS_THERMAL
If you say yes here you get support for TMU (Thermal Management
Unit) on SAMSUNG EXYNOS series of SoC.
-config EXYNOS_THERMAL_EMUL
- bool "EXYNOS TMU emulation mode support"
- depends on EXYNOS_THERMAL
- help
- Exynos 4412 and 4414 and 5 series has emulation mode on TMU.
- Enable this option will be make sysfs node in exynos thermal platform
- device directory to support emulation mode. With emulation mode sysfs
- node, you can manually input temperature to TMU for simulation purpose.
-
config DOVE_THERMAL
tristate "Temperature sensor on Marvell Dove SoCs"
depends on ARCH_DOVE
@@ -144,6 +140,14 @@ config DB8500_THERMAL
created. Cooling devices can be bound to the trip points to cool this
thermal zone if trip points reached.
+config ARMADA_THERMAL
+ tristate "Armada 370/XP thermal management"
+ depends on ARCH_MVEBU
+ depends on OF
+ help
+ Enable this option if you want to have support for thermal management
+ controller present in Armada 370 and Armada XP SoC.
+
config DB8500_CPUFREQ_COOLING
tristate "DB8500 cpufreq cooling"
depends on ARCH_U8500
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index d3a2b38..c054d41 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -3,14 +3,15 @@
#
obj-$(CONFIG_THERMAL) += thermal_sys.o
+thermal_sys-y += thermal_core.o
# governors
-obj-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += fair_share.o
-obj-$(CONFIG_THERMAL_GOV_STEP_WISE) += step_wise.o
-obj-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o
+thermal_sys-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += fair_share.o
+thermal_sys-$(CONFIG_THERMAL_GOV_STEP_WISE) += step_wise.o
+thermal_sys-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o
# cpufreq cooling
-obj-$(CONFIG_CPU_THERMAL) += cpu_cooling.o
+thermal_sys-$(CONFIG_CPU_THERMAL) += cpu_cooling.o
# platform thermal drivers
obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o
@@ -19,6 +20,7 @@ obj-$(CONFIG_KIRKWOOD_THERMAL) += kirkwood_thermal.o
obj-$(CONFIG_EXYNOS_THERMAL) += exynos_thermal.o
obj-$(CONFIG_DOVE_THERMAL) += dove_thermal.o
obj-$(CONFIG_DB8500_THERMAL) += db8500_thermal.o
+obj-$(CONFIG_ARMADA_THERMAL) += armada_thermal.o
obj-$(CONFIG_DB8500_CPUFREQ_COOLING) += db8500_cpufreq_cooling.o
obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
new file mode 100644
index 0000000..5b4d75f
--- /dev/null
+++ b/drivers/thermal/armada_thermal.c
@@ -0,0 +1,232 @@
+/*
+ * Marvell Armada 370/XP thermal sensor driver
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/thermal.h>
+
+#define THERMAL_VALID_OFFSET 9
+#define THERMAL_VALID_MASK 0x1
+#define THERMAL_TEMP_OFFSET 10
+#define THERMAL_TEMP_MASK 0x1ff
+
+/* Thermal Manager Control and Status Register */
+#define PMU_TDC0_SW_RST_MASK (0x1 << 1)
+#define PMU_TM_DISABLE_OFFS 0
+#define PMU_TM_DISABLE_MASK (0x1 << PMU_TM_DISABLE_OFFS)
+#define PMU_TDC0_REF_CAL_CNT_OFFS 11
+#define PMU_TDC0_REF_CAL_CNT_MASK (0x1ff << PMU_TDC0_REF_CAL_CNT_OFFS)
+#define PMU_TDC0_OTF_CAL_MASK (0x1 << 30)
+#define PMU_TDC0_START_CAL_MASK (0x1 << 25)
+
+struct armada_thermal_ops;
+
+/* Marvell EBU Thermal Sensor Dev Structure */
+struct armada_thermal_priv {
+ void __iomem *sensor;
+ void __iomem *control;
+ struct armada_thermal_ops *ops;
+};
+
+struct armada_thermal_ops {
+ /* Initialize the sensor */
+ void (*init_sensor)(struct armada_thermal_priv *);
+
+ /* Test for a valid sensor value (optional) */
+ bool (*is_valid)(struct armada_thermal_priv *);
+};
+
+static void armadaxp_init_sensor(struct armada_thermal_priv *priv)
+{
+ unsigned long reg;
+
+ reg = readl_relaxed(priv->control);
+ reg |= PMU_TDC0_OTF_CAL_MASK;
+ writel(reg, priv->control);
+
+ /* Reference calibration value */
+ reg &= ~PMU_TDC0_REF_CAL_CNT_MASK;
+ reg |= (0xf1 << PMU_TDC0_REF_CAL_CNT_OFFS);
+ writel(reg, priv->control);
+
+ /* Reset the sensor */
+ reg = readl_relaxed(priv->control);
+ writel((reg | PMU_TDC0_SW_RST_MASK), priv->control);
+
+ writel(reg, priv->control);
+
+ /* Enable the sensor */
+ reg = readl_relaxed(priv->sensor);
+ reg &= ~PMU_TM_DISABLE_MASK;
+ writel(reg, priv->sensor);
+}
+
+static void armada370_init_sensor(struct armada_thermal_priv *priv)
+{
+ unsigned long reg;
+
+ reg = readl_relaxed(priv->control);
+ reg |= PMU_TDC0_OTF_CAL_MASK;
+ writel(reg, priv->control);
+
+ /* Reference calibration value */
+ reg &= ~PMU_TDC0_REF_CAL_CNT_MASK;
+ reg |= (0xf1 << PMU_TDC0_REF_CAL_CNT_OFFS);
+ writel(reg, priv->control);
+
+ reg &= ~PMU_TDC0_START_CAL_MASK;
+ writel(reg, priv->control);
+
+ mdelay(10);
+}
+
+static bool armada_is_valid(struct armada_thermal_priv *priv)
+{
+ unsigned long reg = readl_relaxed(priv->sensor);
+
+ return (reg >> THERMAL_VALID_OFFSET) & THERMAL_VALID_MASK;
+}
+
+static int armada_get_temp(struct thermal_zone_device *thermal,
+ unsigned long *temp)
+{
+ struct armada_thermal_priv *priv = thermal->devdata;
+ unsigned long reg;
+
+ /* Valid check */
+ if (priv->ops->is_valid && !priv->ops->is_valid(priv)) {
+ dev_err(&thermal->device,
+ "Temperature sensor reading not valid\n");
+ return -EIO;
+ }
+
+ reg = readl_relaxed(priv->sensor);
+ reg = (reg >> THERMAL_TEMP_OFFSET) & THERMAL_TEMP_MASK;
+ *temp = (3153000000UL - (10000000UL*reg)) / 13825;
+ return 0;
+}
+
+static struct thermal_zone_device_ops ops = {
+ .get_temp = armada_get_temp,
+};
+
+static const struct armada_thermal_ops armadaxp_ops = {
+ .init_sensor = armadaxp_init_sensor,
+};
+
+static const struct armada_thermal_ops armada370_ops = {
+ .is_valid = armada_is_valid,
+ .init_sensor = armada370_init_sensor,
+};
+
+static const struct of_device_id armada_thermal_id_table[] = {
+ {
+ .compatible = "marvell,armadaxp-thermal",
+ .data = &armadaxp_ops,
+ },
+ {
+ .compatible = "marvell,armada370-thermal",
+ .data = &armada370_ops,
+ },
+ {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, armada_thermal_id_table);
+
+static int armada_thermal_probe(struct platform_device *pdev)
+{
+ struct thermal_zone_device *thermal;
+ const struct of_device_id *match;
+ struct armada_thermal_priv *priv;
+ struct resource *res;
+
+ match = of_match_device(armada_thermal_id_table, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get platform resource\n");
+ return -ENODEV;
+ }
+
+ priv->sensor = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->sensor))
+ return PTR_ERR(priv->sensor);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get platform resource\n");
+ return -ENODEV;
+ }
+
+ priv->control = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->control))
+ return PTR_ERR(priv->control);
+
+ priv->ops = (struct armada_thermal_ops *)match->data;
+ priv->ops->init_sensor(priv);
+
+ thermal = thermal_zone_device_register("armada_thermal", 0, 0,
+ priv, &ops, NULL, 0, 0);
+ if (IS_ERR(thermal)) {
+ dev_err(&pdev->dev,
+ "Failed to register thermal zone device\n");
+ return PTR_ERR(thermal);
+ }
+
+ platform_set_drvdata(pdev, thermal);
+
+ return 0;
+}
+
+static int armada_thermal_exit(struct platform_device *pdev)
+{
+ struct thermal_zone_device *armada_thermal =
+ platform_get_drvdata(pdev);
+
+ thermal_zone_device_unregister(armada_thermal);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver armada_thermal_driver = {
+ .probe = armada_thermal_probe,
+ .remove = armada_thermal_exit,
+ .driver = {
+ .name = "armada_thermal",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(armada_thermal_id_table),
+ },
+};
+
+module_platform_driver(armada_thermal_driver);
+
+MODULE_AUTHOR("Ezequiel Garcia <ezequiel.garcia@free-electrons.com>");
+MODULE_DESCRIPTION("Armada 370/XP thermal driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 8dc44cb..c94bf2e 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -20,10 +20,8 @@
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
-#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/thermal.h>
-#include <linux/platform_device.h>
#include <linux/cpufreq.h>
#include <linux/err.h>
#include <linux/slab.h>
@@ -31,21 +29,19 @@
#include <linux/cpu_cooling.h>
/**
- * struct cpufreq_cooling_device
+ * struct cpufreq_cooling_device - data for cooling device with cpufreq
* @id: unique integer value corresponding to each cpufreq_cooling_device
* registered.
- * @cool_dev: thermal_cooling_device pointer to keep track of the the
- * egistered cooling device.
+ * @cool_dev: thermal_cooling_device pointer to keep track of the
+ * registered cooling device.
* @cpufreq_state: integer value representing the current state of cpufreq
* cooling devices.
* @cpufreq_val: integer value representing the absolute value of the clipped
* frequency.
* @allowed_cpus: all the cpus involved for this cpufreq_cooling_device.
- * @node: list_head to link all cpufreq_cooling_device together.
*
* This structure is required for keeping information of each
- * cpufreq_cooling_device registered as a list whose head is represented by
- * cooling_cpufreq_list. In order to prevent corruption of this list a
+ * cpufreq_cooling_device registered. In order to prevent corruption of this a
* mutex lock cooling_cpufreq_lock is used.
*/
struct cpufreq_cooling_device {
@@ -54,9 +50,7 @@ struct cpufreq_cooling_device {
unsigned int cpufreq_state;
unsigned int cpufreq_val;
struct cpumask allowed_cpus;
- struct list_head node;
};
-static LIST_HEAD(cooling_cpufreq_list);
static DEFINE_IDR(cpufreq_idr);
static DEFINE_MUTEX(cooling_cpufreq_lock);
@@ -70,6 +64,11 @@ static struct cpufreq_cooling_device *notify_device;
* get_idr - function to get a unique id.
* @idr: struct idr * handle used to create a id.
* @id: int * value generated by this function.
+ *
+ * This function will populate @id with an unique
+ * id, using the idr API.
+ *
+ * Return: 0 on success, an error code on failure.
*/
static int get_idr(struct idr *idr, int *id)
{
@@ -81,6 +80,7 @@ static int get_idr(struct idr *idr, int *id)
if (unlikely(ret < 0))
return ret;
*id = ret;
+
return 0;
}
@@ -99,63 +99,162 @@ static void release_idr(struct idr *idr, int id)
/* Below code defines functions to be used for cpufreq as cooling device */
/**
- * is_cpufreq_valid - function to check if a cpu has frequency transition policy.
+ * is_cpufreq_valid - function to check frequency transitioning capability.
* @cpu: cpu for which check is needed.
+ *
+ * This function will check the current state of the system if
+ * it is capable of changing the frequency for a given @cpu.
+ *
+ * Return: 0 if the system is not currently capable of changing
+ * the frequency of given cpu. !0 in case the frequency is changeable.
*/
static int is_cpufreq_valid(int cpu)
{
struct cpufreq_policy policy;
+
return !cpufreq_get_policy(&policy, cpu);
}
+enum cpufreq_cooling_property {
+ GET_LEVEL,
+ GET_FREQ,
+ GET_MAXL,
+};
+
/**
- * get_cpu_frequency - get the absolute value of frequency from level.
- * @cpu: cpu for which frequency is fetched.
- * @level: level of frequency, equals cooling state of cpu cooling device
- * e.g level=0 --> 1st MAX FREQ, level=1 ---> 2nd MAX FREQ, .... etc
+ * get_property - fetch a property of interest for a give cpu.
+ * @cpu: cpu for which the property is required
+ * @input: query parameter
+ * @output: query return
+ * @property: type of query (frequency, level, max level)
+ *
+ * This is the common function to
+ * 1. get maximum cpu cooling states
+ * 2. translate frequency to cooling state
+ * 3. translate cooling state to frequency
+ * Note that the code may be not in good shape
+ * but it is written in this way in order to:
+ * a) reduce duplicate code as most of the code can be shared.
+ * b) make sure the logic is consistent when translating between
+ * cooling states and frequencies.
+ *
+ * Return: 0 on success, -EINVAL when invalid parameters are passed.
*/
-static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level)
+static int get_property(unsigned int cpu, unsigned long input,
+ unsigned int *output,
+ enum cpufreq_cooling_property property)
{
- int ret = 0, i = 0;
- unsigned long level_index;
- bool descend = false;
+ int i, j;
+ unsigned long max_level = 0, level = 0;
+ unsigned int freq = CPUFREQ_ENTRY_INVALID;
+ int descend = -1;
struct cpufreq_frequency_table *table =
cpufreq_frequency_get_table(cpu);
+
+ if (!output)
+ return -EINVAL;
+
if (!table)
- return ret;
+ return -EINVAL;
- while (table[i].frequency != CPUFREQ_TABLE_END) {
+ for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ /* ignore invalid entries */
if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
continue;
- /*check if table in ascending or descending order*/
- if ((table[i + 1].frequency != CPUFREQ_TABLE_END) &&
- (table[i + 1].frequency < table[i].frequency)
- && !descend) {
- descend = true;
- }
+ /* ignore duplicate entry */
+ if (freq == table[i].frequency)
+ continue;
+
+ /* get the frequency order */
+ if (freq != CPUFREQ_ENTRY_INVALID && descend != -1)
+ descend = !!(freq > table[i].frequency);
- /*return if level matched and table in descending order*/
- if (descend && i == level)
- return table[i].frequency;
- i++;
+ freq = table[i].frequency;
+ max_level++;
}
- i--;
- if (level > i || descend)
- return ret;
- level_index = i - level;
+ /* get max level */
+ if (property == GET_MAXL) {
+ *output = (unsigned int)max_level;
+ return 0;
+ }
- /*Scan the table in reverse order and match the level*/
- while (i >= 0) {
+ if (property == GET_FREQ)
+ level = descend ? input : (max_level - input - 1);
+
+ for (i = 0, j = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ /* ignore invalid entry */
if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
continue;
- /*return if level matched*/
- if (i == level_index)
- return table[i].frequency;
- i--;
+
+ /* ignore duplicate entry */
+ if (freq == table[i].frequency)
+ continue;
+
+ /* now we have a valid frequency entry */
+ freq = table[i].frequency;
+
+ if (property == GET_LEVEL && (unsigned int)input == freq) {
+ /* get level by frequency */
+ *output = descend ? j : (max_level - j - 1);
+ return 0;
+ }
+ if (property == GET_FREQ && level == j) {
+ /* get frequency by level */
+ *output = freq;
+ return 0;
+ }
+ j++;
}
- return ret;
+
+ return -EINVAL;
+}
+
+/**
+ * cpufreq_cooling_get_level - for a give cpu, return the cooling level.
+ * @cpu: cpu for which the level is required
+ * @freq: the frequency of interest
+ *
+ * This function will match the cooling level corresponding to the
+ * requested @freq and return it.
+ *
+ * Return: The matched cooling level on success or THERMAL_CSTATE_INVALID
+ * otherwise.
+ */
+unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
+{
+ unsigned int val;
+
+ if (get_property(cpu, (unsigned long)freq, &val, GET_LEVEL))
+ return THERMAL_CSTATE_INVALID;
+
+ return (unsigned long)val;
+}
+EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level);
+
+/**
+ * get_cpu_frequency - get the absolute value of frequency from level.
+ * @cpu: cpu for which frequency is fetched.
+ * @level: cooling level
+ *
+ * This function matches cooling level with frequency. Based on a cooling level
+ * of frequency, equals cooling state of cpu cooling device, it will return
+ * the corresponding frequency.
+ * e.g level=0 --> 1st MAX FREQ, level=1 ---> 2nd MAX FREQ, .... etc
+ *
+ * Return: 0 on error, the corresponding frequency otherwise.
+ */
+static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level)
+{
+ int ret = 0;
+ unsigned int freq;
+
+ ret = get_property(cpu, level, &freq, GET_FREQ);
+ if (ret)
+ return 0;
+
+ return freq;
}
/**
@@ -163,13 +262,19 @@ static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level)
* @cpufreq_device: cpufreq_cooling_device pointer containing frequency
* clipping data.
* @cooling_state: value of the cooling state.
+ *
+ * Function used to make sure the cpufreq layer is aware of current thermal
+ * limits. The limits are applied by updating the cpufreq policy.
+ *
+ * Return: 0 on success, an error code otherwise (-EINVAL in case wrong
+ * cooling state).
*/
static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device,
- unsigned long cooling_state)
+ unsigned long cooling_state)
{
unsigned int cpuid, clip_freq;
- struct cpumask *maskPtr = &cpufreq_device->allowed_cpus;
- unsigned int cpu = cpumask_any(maskPtr);
+ struct cpumask *mask = &cpufreq_device->allowed_cpus;
+ unsigned int cpu = cpumask_any(mask);
/* Check if the old cooling action is same as new cooling action */
@@ -184,7 +289,7 @@ static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device,
cpufreq_device->cpufreq_val = clip_freq;
notify_device = cpufreq_device;
- for_each_cpu(cpuid, maskPtr) {
+ for_each_cpu(cpuid, mask) {
if (is_cpufreq_valid(cpuid))
cpufreq_update_policy(cpuid);
}
@@ -199,9 +304,15 @@ static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device,
* @nb: struct notifier_block * with callback info.
* @event: value showing cpufreq event for which this function invoked.
* @data: callback-specific data
+ *
+ * Callback to highjack the notification on cpufreq policy transition.
+ * Every time there is a change in policy, we will intercept and
+ * update the cpufreq policy with thermal constraints.
+ *
+ * Return: 0 (success)
*/
static int cpufreq_thermal_notifier(struct notifier_block *nb,
- unsigned long event, void *data)
+ unsigned long event, void *data)
{
struct cpufreq_policy *policy = data;
unsigned long max_freq = 0;
@@ -212,7 +323,7 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
if (cpumask_test_cpu(policy->cpu, &notify_device->allowed_cpus))
max_freq = notify_device->cpufreq_val;
- /* Never exceed user_policy.max*/
+ /* Never exceed user_policy.max */
if (max_freq > policy->user_policy.max)
max_freq = policy->user_policy.max;
@@ -222,50 +333,46 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
return 0;
}
-/*
- * cpufreq cooling device callback functions are defined below
- */
+/* cpufreq cooling device callback functions are defined below */
/**
* cpufreq_get_max_state - callback function to get the max cooling state.
* @cdev: thermal cooling device pointer.
* @state: fill this variable with the max cooling state.
+ *
+ * Callback for the thermal cooling device to return the cpufreq
+ * max cooling state.
+ *
+ * Return: 0 on success, an error code otherwise.
*/
static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
- struct cpumask *maskPtr = &cpufreq_device->allowed_cpus;
+ struct cpumask *mask = &cpufreq_device->allowed_cpus;
unsigned int cpu;
- struct cpufreq_frequency_table *table;
- unsigned long count = 0;
- int i = 0;
-
- cpu = cpumask_any(maskPtr);
- table = cpufreq_frequency_get_table(cpu);
- if (!table) {
- *state = 0;
- return 0;
- }
+ unsigned int count = 0;
+ int ret;
- for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
- if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
- continue;
- count++;
- }
+ cpu = cpumask_any(mask);
- if (count > 0) {
- *state = --count;
- return 0;
- }
+ ret = get_property(cpu, 0, &count, GET_MAXL);
- return -EINVAL;
+ if (count > 0)
+ *state = count;
+
+ return ret;
}
/**
* cpufreq_get_cur_state - callback function to get the current cooling state.
* @cdev: thermal cooling device pointer.
* @state: fill this variable with the current cooling state.
+ *
+ * Callback for the thermal cooling device to return the cpufreq
+ * current cooling state.
+ *
+ * Return: 0 on success, an error code otherwise.
*/
static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
unsigned long *state)
@@ -273,6 +380,7 @@ static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
*state = cpufreq_device->cpufreq_state;
+
return 0;
}
@@ -280,6 +388,11 @@ static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
* cpufreq_set_cur_state - callback function to set the current cooling state.
* @cdev: thermal cooling device pointer.
* @state: set this variable to the current cooling state.
+ *
+ * Callback for the thermal cooling device to change the cpufreq
+ * current cooling state.
+ *
+ * Return: 0 on success, an error code otherwise.
*/
static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
@@ -304,9 +417,16 @@ static struct notifier_block thermal_cpufreq_notifier_block = {
/**
* cpufreq_cooling_register - function to create cpufreq cooling device.
* @clip_cpus: cpumask of cpus where the frequency constraints will happen.
+ *
+ * This interface function registers the cpufreq cooling device with the name
+ * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
+ * cooling devices.
+ *
+ * Return: a valid struct thermal_cooling_device pointer on success,
+ * on failure, it returns a corresponding ERR_PTR().
*/
-struct thermal_cooling_device *cpufreq_cooling_register(
- const struct cpumask *clip_cpus)
+struct thermal_cooling_device *
+cpufreq_cooling_register(const struct cpumask *clip_cpus)
{
struct thermal_cooling_device *cool_dev;
struct cpufreq_cooling_device *cpufreq_dev = NULL;
@@ -315,9 +435,9 @@ struct thermal_cooling_device *cpufreq_cooling_register(
int ret = 0, i;
struct cpufreq_policy policy;
- /*Verify that all the clip cpus have same freq_min, freq_max limit*/
+ /* Verify that all the clip cpus have same freq_min, freq_max limit */
for_each_cpu(i, clip_cpus) {
- /*continue if cpufreq policy not found and not return error*/
+ /* continue if cpufreq policy not found and not return error */
if (!cpufreq_get_policy(&policy, i))
continue;
if (min == 0 && max == 0) {
@@ -325,12 +445,12 @@ struct thermal_cooling_device *cpufreq_cooling_register(
max = policy.cpuinfo.max_freq;
} else {
if (min != policy.cpuinfo.min_freq ||
- max != policy.cpuinfo.max_freq)
+ max != policy.cpuinfo.max_freq)
return ERR_PTR(-EINVAL);
}
}
cpufreq_dev = kzalloc(sizeof(struct cpufreq_cooling_device),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!cpufreq_dev)
return ERR_PTR(-ENOMEM);
@@ -342,10 +462,11 @@ struct thermal_cooling_device *cpufreq_cooling_register(
return ERR_PTR(-EINVAL);
}
- sprintf(dev_name, "thermal-cpufreq-%d", cpufreq_dev->id);
+ snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
+ cpufreq_dev->id);
cool_dev = thermal_cooling_device_register(dev_name, cpufreq_dev,
- &cpufreq_cooling_ops);
+ &cpufreq_cooling_ops);
if (!cool_dev) {
release_idr(&cpufreq_idr, cpufreq_dev->id);
kfree(cpufreq_dev);
@@ -358,17 +479,20 @@ struct thermal_cooling_device *cpufreq_cooling_register(
/* Register the notifier for first cpufreq cooling device */
if (cpufreq_dev_count == 0)
cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
- CPUFREQ_POLICY_NOTIFIER);
+ CPUFREQ_POLICY_NOTIFIER);
cpufreq_dev_count++;
mutex_unlock(&cooling_cpufreq_lock);
+
return cool_dev;
}
-EXPORT_SYMBOL(cpufreq_cooling_register);
+EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
/**
* cpufreq_cooling_unregister - function to remove cpufreq cooling device.
* @cdev: thermal cooling device pointer.
+ *
+ * This interface function unregisters the "thermal-cpufreq-%x" cooling device.
*/
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
@@ -378,14 +502,13 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
cpufreq_dev_count--;
/* Unregister the notifier for the last cpufreq cooling device */
- if (cpufreq_dev_count == 0) {
+ if (cpufreq_dev_count == 0)
cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
- CPUFREQ_POLICY_NOTIFIER);
- }
+ CPUFREQ_POLICY_NOTIFIER);
mutex_unlock(&cooling_cpufreq_lock);
thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
release_idr(&cpufreq_idr, cpufreq_dev->id);
kfree(cpufreq_dev);
}
-EXPORT_SYMBOL(cpufreq_cooling_unregister);
+EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);
diff --git a/drivers/thermal/db8500_cpufreq_cooling.c b/drivers/thermal/db8500_cpufreq_cooling.c
index 2141985..786d192 100644
--- a/drivers/thermal/db8500_cpufreq_cooling.c
+++ b/drivers/thermal/db8500_cpufreq_cooling.c
@@ -37,7 +37,7 @@ static int db8500_cpufreq_cooling_probe(struct platform_device *pdev)
cpumask_set_cpu(0, &mask_val);
cdev = cpufreq_cooling_register(&mask_val);
- if (IS_ERR_OR_NULL(cdev)) {
+ if (IS_ERR(cdev)) {
dev_err(&pdev->dev, "Failed to register cooling device\n");
return PTR_ERR(cdev);
}
diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c
index 61ce60a..1e3b3bf 100644
--- a/drivers/thermal/db8500_thermal.c
+++ b/drivers/thermal/db8500_thermal.c
@@ -419,7 +419,8 @@ static int db8500_thermal_probe(struct platform_device *pdev)
low_irq = platform_get_irq_byname(pdev, "IRQ_HOTMON_LOW");
if (low_irq < 0) {
dev_err(&pdev->dev, "Get IRQ_HOTMON_LOW failed.\n");
- return low_irq;
+ ret = low_irq;
+ goto out_unlock;
}
ret = devm_request_threaded_irq(&pdev->dev, low_irq, NULL,
@@ -427,13 +428,14 @@ static int db8500_thermal_probe(struct platform_device *pdev)
"dbx500_temp_low", pzone);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to allocate temp low irq.\n");
- return ret;
+ goto out_unlock;
}
high_irq = platform_get_irq_byname(pdev, "IRQ_HOTMON_HIGH");
if (high_irq < 0) {
dev_err(&pdev->dev, "Get IRQ_HOTMON_HIGH failed.\n");
- return high_irq;
+ ret = high_irq;
+ goto out_unlock;
}
ret = devm_request_threaded_irq(&pdev->dev, high_irq, NULL,
@@ -441,15 +443,16 @@ static int db8500_thermal_probe(struct platform_device *pdev)
"dbx500_temp_high", pzone);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to allocate temp high irq.\n");
- return ret;
+ goto out_unlock;
}
pzone->therm_dev = thermal_zone_device_register("db8500_thermal_zone",
ptrips->num_trips, 0, pzone, &thdev_ops, NULL, 0, 0);
- if (IS_ERR_OR_NULL(pzone->therm_dev)) {
+ if (IS_ERR(pzone->therm_dev)) {
dev_err(&pdev->dev, "Register thermal zone device failed.\n");
- return PTR_ERR(pzone->therm_dev);
+ ret = PTR_ERR(pzone->therm_dev);
+ goto out_unlock;
}
dev_info(&pdev->dev, "Thermal zone device registered.\n");
@@ -461,9 +464,11 @@ static int db8500_thermal_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pzone);
pzone->mode = THERMAL_DEVICE_ENABLED;
+
+out_unlock:
mutex_unlock(&pzone->th_lock);
- return 0;
+ return ret;
}
static int db8500_thermal_remove(struct platform_device *pdev)
diff --git a/drivers/thermal/dove_thermal.c b/drivers/thermal/dove_thermal.c
index 3078c40..4b15a5f 100644
--- a/drivers/thermal/dove_thermal.c
+++ b/drivers/thermal/dove_thermal.c
@@ -107,12 +107,13 @@ static int dove_get_temp(struct thermal_zone_device *thermal,
}
/*
- * Calculate temperature. See Section 8.10.1 of 88AP510,
- * Documentation/arm/Marvell/README
+ * Calculate temperature. According to Marvell internal
+ * documentation the formula for this is:
+ * Celsius = (322-reg)/1.3625
*/
reg = readl_relaxed(priv->sensor);
reg = (reg >> DOVE_THERMAL_TEMP_OFFSET) & DOVE_THERMAL_TEMP_MASK;
- *temp = ((2281638UL - (7298*reg)) / 10);
+ *temp = ((3220000000UL - (10000000UL * reg)) / 13625);
return 0;
}
diff --git a/drivers/thermal/exynos_thermal.c b/drivers/thermal/exynos_thermal.c
index b777ae6..d20ce9e 100644
--- a/drivers/thermal/exynos_thermal.c
+++ b/drivers/thermal/exynos_thermal.c
@@ -98,13 +98,13 @@
#define IDLE_INTERVAL 10000
#define MCELSIUS 1000
-#ifdef CONFIG_EXYNOS_THERMAL_EMUL
+#ifdef CONFIG_THERMAL_EMULATION
#define EXYNOS_EMUL_TIME 0x57F0
#define EXYNOS_EMUL_TIME_SHIFT 16
#define EXYNOS_EMUL_DATA_SHIFT 8
#define EXYNOS_EMUL_DATA_MASK 0xFF
#define EXYNOS_EMUL_ENABLE 0x1
-#endif /* CONFIG_EXYNOS_THERMAL_EMUL */
+#endif /* CONFIG_THERMAL_EMULATION */
/* CPU Zone information */
#define PANIC_ZONE 4
@@ -143,6 +143,7 @@ struct thermal_cooling_conf {
struct thermal_sensor_conf {
char name[SENSOR_NAME_LEN];
int (*read_temperature)(void *data);
+ int (*write_emul_temp)(void *drv_data, unsigned long temp);
struct thermal_trip_point_conf trip_data;
struct thermal_cooling_conf cooling_data;
void *private_data;
@@ -240,26 +241,6 @@ static int exynos_get_crit_temp(struct thermal_zone_device *thermal,
return ret;
}
-static int exynos_get_frequency_level(unsigned int cpu, unsigned int freq)
-{
- int i = 0, ret = -EINVAL;
- struct cpufreq_frequency_table *table = NULL;
-#ifdef CONFIG_CPU_FREQ
- table = cpufreq_frequency_get_table(cpu);
-#endif
- if (!table)
- return ret;
-
- while (table[i].frequency != CPUFREQ_TABLE_END) {
- if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
- continue;
- if (table[i].frequency == freq)
- return i;
- i++;
- }
- return ret;
-}
-
/* Bind callback functions for thermal zone */
static int exynos_bind(struct thermal_zone_device *thermal,
struct thermal_cooling_device *cdev)
@@ -286,8 +267,8 @@ static int exynos_bind(struct thermal_zone_device *thermal,
/* Bind the thermal zone to the cpufreq cooling device */
for (i = 0; i < tab_size; i++) {
clip_data = (struct freq_clip_table *)&(tab_ptr[i]);
- level = exynos_get_frequency_level(0, clip_data->freq_clip_max);
- if (level < 0)
+ level = cpufreq_cooling_get_level(0, clip_data->freq_clip_max);
+ if (level == THERMAL_CSTATE_INVALID)
return 0;
switch (GET_ZONE(i)) {
case MONITOR_ZONE:
@@ -367,6 +348,23 @@ static int exynos_get_temp(struct thermal_zone_device *thermal,
return 0;
}
+/* Get temperature callback functions for thermal zone */
+static int exynos_set_emul_temp(struct thermal_zone_device *thermal,
+ unsigned long temp)
+{
+ void *data;
+ int ret = -EINVAL;
+
+ if (!th_zone->sensor_conf) {
+ pr_info("Temperature sensor not initialised\n");
+ return -EINVAL;
+ }
+ data = th_zone->sensor_conf->private_data;
+ if (th_zone->sensor_conf->write_emul_temp)
+ ret = th_zone->sensor_conf->write_emul_temp(data, temp);
+ return ret;
+}
+
/* Get the temperature trend */
static int exynos_get_trend(struct thermal_zone_device *thermal,
int trip, enum thermal_trend *trend)
@@ -390,6 +388,7 @@ static struct thermal_zone_device_ops const exynos_dev_ops = {
.bind = exynos_bind,
.unbind = exynos_unbind,
.get_temp = exynos_get_temp,
+ .set_emul_temp = exynos_set_emul_temp,
.get_trend = exynos_get_trend,
.get_mode = exynos_get_mode,
.set_mode = exynos_set_mode,
@@ -712,6 +711,47 @@ static int exynos_tmu_read(struct exynos_tmu_data *data)
return temp;
}
+#ifdef CONFIG_THERMAL_EMULATION
+static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
+{
+ struct exynos_tmu_data *data = drv_data;
+ unsigned int reg;
+ int ret = -EINVAL;
+
+ if (data->soc == SOC_ARCH_EXYNOS4210)
+ goto out;
+
+ if (temp && temp < MCELSIUS)
+ goto out;
+
+ mutex_lock(&data->lock);
+ clk_enable(data->clk);
+
+ reg = readl(data->base + EXYNOS_EMUL_CON);
+
+ if (temp) {
+ temp /= MCELSIUS;
+
+ reg = (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT) |
+ (temp_to_code(data, temp)
+ << EXYNOS_EMUL_DATA_SHIFT) | EXYNOS_EMUL_ENABLE;
+ } else {
+ reg &= ~EXYNOS_EMUL_ENABLE;
+ }
+
+ writel(reg, data->base + EXYNOS_EMUL_CON);
+
+ clk_disable(data->clk);
+ mutex_unlock(&data->lock);
+ return 0;
+out:
+ return ret;
+}
+#else
+static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
+ { return -EINVAL; }
+#endif/*CONFIG_THERMAL_EMULATION*/
+
static void exynos_tmu_work(struct work_struct *work)
{
struct exynos_tmu_data *data = container_of(work,
@@ -745,6 +785,7 @@ static irqreturn_t exynos_tmu_irq(int irq, void *id)
static struct thermal_sensor_conf exynos_sensor_conf = {
.name = "exynos-therm",
.read_temperature = (int (*)(void *))exynos_tmu_read,
+ .write_emul_temp = exynos_tmu_set_emulation,
};
#if defined(CONFIG_CPU_EXYNOS4210)
@@ -814,6 +855,10 @@ static const struct of_device_id exynos_tmu_match[] = {
.data = (void *)EXYNOS4210_TMU_DRV_DATA,
},
{
+ .compatible = "samsung,exynos4412-tmu",
+ .data = (void *)EXYNOS_TMU_DRV_DATA,
+ },
+ {
.compatible = "samsung,exynos5250-tmu",
.data = (void *)EXYNOS_TMU_DRV_DATA,
},
@@ -851,93 +896,6 @@ static inline struct exynos_tmu_platform_data *exynos_get_driver_data(
platform_get_device_id(pdev)->driver_data;
}
-#ifdef CONFIG_EXYNOS_THERMAL_EMUL
-static ssize_t exynos_tmu_emulation_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct platform_device *pdev = container_of(dev,
- struct platform_device, dev);
- struct exynos_tmu_data *data = platform_get_drvdata(pdev);
- unsigned int reg;
- u8 temp_code;
- int temp = 0;
-
- if (data->soc == SOC_ARCH_EXYNOS4210)
- goto out;
-
- mutex_lock(&data->lock);
- clk_enable(data->clk);
- reg = readl(data->base + EXYNOS_EMUL_CON);
- clk_disable(data->clk);
- mutex_unlock(&data->lock);
-
- if (reg & EXYNOS_EMUL_ENABLE) {
- reg >>= EXYNOS_EMUL_DATA_SHIFT;
- temp_code = reg & EXYNOS_EMUL_DATA_MASK;
- temp = code_to_temp(data, temp_code);
- }
-out:
- return sprintf(buf, "%d\n", temp * MCELSIUS);
-}
-
-static ssize_t exynos_tmu_emulation_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct platform_device *pdev = container_of(dev,
- struct platform_device, dev);
- struct exynos_tmu_data *data = platform_get_drvdata(pdev);
- unsigned int reg;
- int temp;
-
- if (data->soc == SOC_ARCH_EXYNOS4210)
- goto out;
-
- if (!sscanf(buf, "%d\n", &temp) || temp < 0)
- return -EINVAL;
-
- mutex_lock(&data->lock);
- clk_enable(data->clk);
-
- reg = readl(data->base + EXYNOS_EMUL_CON);
-
- if (temp) {
- /* Both CELSIUS and MCELSIUS type are available for input */
- if (temp > MCELSIUS)
- temp /= MCELSIUS;
-
- reg = (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT) |
- (temp_to_code(data, (temp / MCELSIUS))
- << EXYNOS_EMUL_DATA_SHIFT) | EXYNOS_EMUL_ENABLE;
- } else {
- reg &= ~EXYNOS_EMUL_ENABLE;
- }
-
- writel(reg, data->base + EXYNOS_EMUL_CON);
-
- clk_disable(data->clk);
- mutex_unlock(&data->lock);
-
-out:
- return count;
-}
-
-static DEVICE_ATTR(emulation, 0644, exynos_tmu_emulation_show,
- exynos_tmu_emulation_store);
-static int create_emulation_sysfs(struct device *dev)
-{
- return device_create_file(dev, &dev_attr_emulation);
-}
-static void remove_emulation_sysfs(struct device *dev)
-{
- device_remove_file(dev, &dev_attr_emulation);
-}
-#else
-static inline int create_emulation_sysfs(struct device *dev) { return 0; }
-static inline void remove_emulation_sysfs(struct device *dev) {}
-#endif
-
static int exynos_tmu_probe(struct platform_device *pdev)
{
struct exynos_tmu_data *data;
@@ -983,12 +941,16 @@ static int exynos_tmu_probe(struct platform_device *pdev)
return ret;
}
- data->clk = clk_get(NULL, "tmu_apbif");
+ data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
if (IS_ERR(data->clk)) {
dev_err(&pdev->dev, "Failed to get clock\n");
return PTR_ERR(data->clk);
}
+ ret = clk_prepare(data->clk);
+ if (ret)
+ return ret;
+
if (pdata->type == SOC_ARCH_EXYNOS ||
pdata->type == SOC_ARCH_EXYNOS4210)
data->soc = pdata->type;
@@ -1037,14 +999,10 @@ static int exynos_tmu_probe(struct platform_device *pdev)
goto err_clk;
}
- ret = create_emulation_sysfs(&pdev->dev);
- if (ret)
- dev_err(&pdev->dev, "Failed to create emulation mode sysfs node\n");
-
return 0;
err_clk:
platform_set_drvdata(pdev, NULL);
- clk_put(data->clk);
+ clk_unprepare(data->clk);
return ret;
}
@@ -1052,13 +1010,11 @@ static int exynos_tmu_remove(struct platform_device *pdev)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
- remove_emulation_sysfs(&pdev->dev);
-
exynos_tmu_control(pdev, false);
exynos_unregister_thermal();
- clk_put(data->clk);
+ clk_unprepare(data->clk);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/fair_share.c
index 792479f..944ba2f 100644
--- a/drivers/thermal/fair_share.c
+++ b/drivers/thermal/fair_share.c
@@ -22,9 +22,6 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
#include <linux/thermal.h>
#include "thermal_core.h"
@@ -111,23 +108,15 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
static struct thermal_governor thermal_gov_fair_share = {
.name = "fair_share",
.throttle = fair_share_throttle,
- .owner = THIS_MODULE,
};
-static int __init thermal_gov_fair_share_init(void)
+int thermal_gov_fair_share_register(void)
{
return thermal_register_governor(&thermal_gov_fair_share);
}
-static void __exit thermal_gov_fair_share_exit(void)
+void thermal_gov_fair_share_unregister(void)
{
thermal_unregister_governor(&thermal_gov_fair_share);
}
-/* This should load after thermal framework */
-fs_initcall(thermal_gov_fair_share_init);
-module_exit(thermal_gov_fair_share_exit);
-
-MODULE_AUTHOR("Durgadoss R");
-MODULE_DESCRIPTION("A simple weight based thermal throttling governor");
-MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/kirkwood_thermal.c b/drivers/thermal/kirkwood_thermal.c
index e5500ed..dfeceaf 100644
--- a/drivers/thermal/kirkwood_thermal.c
+++ b/drivers/thermal/kirkwood_thermal.c
@@ -41,21 +41,21 @@ static int kirkwood_get_temp(struct thermal_zone_device *thermal,
reg = readl_relaxed(priv->sensor);
/* Valid check */
- if (!(reg >> KIRKWOOD_THERMAL_VALID_OFFSET) &
- KIRKWOOD_THERMAL_VALID_MASK) {
+ if (!((reg >> KIRKWOOD_THERMAL_VALID_OFFSET) &
+ KIRKWOOD_THERMAL_VALID_MASK)) {
dev_err(&thermal->device,
"Temperature sensor reading not valid\n");
return -EIO;
}
/*
- * Calculate temperature. See Section 8.10.1 of the 88AP510,
- * datasheet, which has the same sensor.
- * Documentation/arm/Marvell/README
+ * Calculate temperature. According to Marvell internal
+ * documentation the formula for this is:
+ * Celsius = (322-reg)/1.3625
*/
reg = (reg >> KIRKWOOD_THERMAL_TEMP_OFFSET) &
KIRKWOOD_THERMAL_TEMP_MASK;
- *temp = ((2281638UL - (7298*reg)) / 10);
+ *temp = ((3220000000UL - (10000000UL * reg)) / 13625);
return 0;
}
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 2cc5b61..8d7edd4 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -24,6 +24,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -377,6 +378,9 @@ static int rcar_thermal_probe(struct platform_device *pdev)
spin_lock_init(&common->lock);
common->dev = dev;
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (irq) {
int ret;
@@ -419,12 +423,15 @@ static int rcar_thermal_probe(struct platform_device *pdev)
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
dev_err(dev, "Could not allocate priv\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto error_unregister;
}
priv->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto error_unregister;
+ }
priv->common = common;
priv->id = i;
@@ -443,10 +450,10 @@ static int rcar_thermal_probe(struct platform_device *pdev)
goto error_unregister;
}
- list_move_tail(&priv->list, &common->head);
-
if (rcar_has_irq_support(priv))
rcar_thermal_irq_enable(priv);
+
+ list_move_tail(&priv->list, &common->head);
}
platform_set_drvdata(pdev, common);
@@ -456,8 +463,14 @@ static int rcar_thermal_probe(struct platform_device *pdev)
return 0;
error_unregister:
- rcar_thermal_for_each_priv(priv, common)
+ rcar_thermal_for_each_priv(priv, common) {
thermal_zone_device_unregister(priv->zone);
+ if (rcar_has_irq_support(priv))
+ rcar_thermal_irq_disable(priv);
+ }
+
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
return ret;
}
@@ -465,13 +478,20 @@ error_unregister:
static int rcar_thermal_remove(struct platform_device *pdev)
{
struct rcar_thermal_common *common = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
struct rcar_thermal_priv *priv;
- rcar_thermal_for_each_priv(priv, common)
+ rcar_thermal_for_each_priv(priv, common) {
thermal_zone_device_unregister(priv->zone);
+ if (rcar_has_irq_support(priv))
+ rcar_thermal_irq_disable(priv);
+ }
platform_set_drvdata(pdev, NULL);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
return 0;
}
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index 407cde3..4d4ddae 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -22,9 +22,6 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
#include <linux/thermal.h>
#include "thermal_core.h"
@@ -59,9 +56,12 @@ static unsigned long get_target_state(struct thermal_instance *instance,
switch (trend) {
case THERMAL_TREND_RAISING:
- if (throttle)
+ if (throttle) {
cur_state = cur_state < instance->upper ?
(cur_state + 1) : instance->upper;
+ if (cur_state < instance->lower)
+ cur_state = instance->lower;
+ }
break;
case THERMAL_TREND_RAISE_FULL:
if (throttle)
@@ -71,8 +71,11 @@ static unsigned long get_target_state(struct thermal_instance *instance,
if (cur_state == instance->lower) {
if (!throttle)
cur_state = -1;
- } else
+ } else {
cur_state -= 1;
+ if (cur_state > instance->upper)
+ cur_state = instance->upper;
+ }
break;
case THERMAL_TREND_DROP_FULL:
if (cur_state == instance->lower) {
@@ -180,23 +183,14 @@ static int step_wise_throttle(struct thermal_zone_device *tz, int trip)
static struct thermal_governor thermal_gov_step_wise = {
.name = "step_wise",
.throttle = step_wise_throttle,
- .owner = THIS_MODULE,
};
-static int __init thermal_gov_step_wise_init(void)
+int thermal_gov_step_wise_register(void)
{
return thermal_register_governor(&thermal_gov_step_wise);
}
-static void __exit thermal_gov_step_wise_exit(void)
+void thermal_gov_step_wise_unregister(void)
{
thermal_unregister_governor(&thermal_gov_step_wise);
}
-
-/* This should load after thermal framework */
-fs_initcall(thermal_gov_step_wise_init);
-module_exit(thermal_gov_step_wise_exit);
-
-MODULE_AUTHOR("Durgadoss R");
-MODULE_DESCRIPTION("A step-by-step thermal throttling governor");
-MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_core.c
index 5b7863a..d755440 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_core.c
@@ -40,7 +40,7 @@
MODULE_AUTHOR("Zhang Rui");
MODULE_DESCRIPTION("Generic thermal management sysfs support");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
static DEFINE_IDR(thermal_tz_idr);
static DEFINE_IDR(thermal_cdev_idr);
@@ -99,7 +99,6 @@ int thermal_register_governor(struct thermal_governor *governor)
return err;
}
-EXPORT_SYMBOL_GPL(thermal_register_governor);
void thermal_unregister_governor(struct thermal_governor *governor)
{
@@ -127,7 +126,6 @@ exit:
mutex_unlock(&thermal_governor_lock);
return;
}
-EXPORT_SYMBOL_GPL(thermal_unregister_governor);
static int get_idr(struct idr *idr, struct mutex *lock, int *id)
{
@@ -371,16 +369,28 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
monitor_thermal_zone(tz);
}
-static int thermal_zone_get_temp(struct thermal_zone_device *tz,
- unsigned long *temp)
+/**
+ * thermal_zone_get_temp() - returns its the temperature of thermal zone
+ * @tz: a valid pointer to a struct thermal_zone_device
+ * @temp: a valid pointer to where to store the resulting temperature.
+ *
+ * When a valid thermal zone reference is passed, it will fetch its
+ * temperature and fill @temp.
+ *
+ * Return: On success returns 0, an error code otherwise
+ */
+int thermal_zone_get_temp(struct thermal_zone_device *tz, unsigned long *temp)
{
- int ret = 0;
+ int ret = -EINVAL;
#ifdef CONFIG_THERMAL_EMULATION
int count;
unsigned long crit_temp = -1UL;
enum thermal_trip_type type;
#endif
+ if (!tz || IS_ERR(tz))
+ goto exit;
+
mutex_lock(&tz->lock);
ret = tz->ops->get_temp(tz, temp);
@@ -404,8 +414,10 @@ static int thermal_zone_get_temp(struct thermal_zone_device *tz,
skip_emul:
#endif
mutex_unlock(&tz->lock);
+exit:
return ret;
}
+EXPORT_SYMBOL_GPL(thermal_zone_get_temp);
static void update_temperature(struct thermal_zone_device *tz)
{
@@ -434,7 +446,7 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
for (count = 0; count < tz->trips; count++)
handle_thermal_trip(tz, count);
}
-EXPORT_SYMBOL(thermal_zone_device_update);
+EXPORT_SYMBOL_GPL(thermal_zone_device_update);
static void thermal_zone_device_check(struct work_struct *work)
{
@@ -1097,13 +1109,23 @@ thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
#endif
/**
- * thermal_zone_bind_cooling_device - bind a cooling device to a thermal zone
- * @tz: thermal zone device
+ * thermal_zone_bind_cooling_device() - bind a cooling device to a thermal zone
+ * @tz: pointer to struct thermal_zone_device
* @trip: indicates which trip point the cooling devices is
* associated with in this thermal zone.
- * @cdev: thermal cooling device
+ * @cdev: pointer to struct thermal_cooling_device
+ * @upper: the Maximum cooling state for this trip point.
+ * THERMAL_NO_LIMIT means no upper limit,
+ * and the cooling device can be in max_state.
+ * @lower: the Minimum cooling state can be used for this trip point.
+ * THERMAL_NO_LIMIT means no lower limit,
+ * and the cooling device can be in cooling state 0.
*
+ * This interface function bind a thermal cooling device to the certain trip
+ * point of a thermal zone device.
* This function is usually called in the thermal zone device .bind callback.
+ *
+ * Return: 0 on success, the proper error value otherwise.
*/
int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
int trip,
@@ -1197,16 +1219,21 @@ free_mem:
kfree(dev);
return result;
}
-EXPORT_SYMBOL(thermal_zone_bind_cooling_device);
+EXPORT_SYMBOL_GPL(thermal_zone_bind_cooling_device);
/**
- * thermal_zone_unbind_cooling_device - unbind a cooling device from a thermal zone
- * @tz: thermal zone device
+ * thermal_zone_unbind_cooling_device() - unbind a cooling device from a
+ * thermal zone.
+ * @tz: pointer to a struct thermal_zone_device.
* @trip: indicates which trip point the cooling devices is
* associated with in this thermal zone.
- * @cdev: thermal cooling device
+ * @cdev: pointer to a struct thermal_cooling_device.
*
+ * This interface function unbind a thermal cooling device from the certain
+ * trip point of a thermal zone device.
* This function is usually called in the thermal zone device .unbind callback.
+ *
+ * Return: 0 on success, the proper error value otherwise.
*/
int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz,
int trip,
@@ -1237,7 +1264,7 @@ unbind:
kfree(pos);
return 0;
}
-EXPORT_SYMBOL(thermal_zone_unbind_cooling_device);
+EXPORT_SYMBOL_GPL(thermal_zone_unbind_cooling_device);
static void thermal_release(struct device *dev)
{
@@ -1260,10 +1287,17 @@ static struct class thermal_class = {
};
/**
- * thermal_cooling_device_register - register a new thermal cooling device
+ * thermal_cooling_device_register() - register a new thermal cooling device
* @type: the thermal cooling device type.
* @devdata: device private data.
* @ops: standard thermal cooling devices callbacks.
+ *
+ * This interface function adds a new thermal cooling device (fan/processor/...)
+ * to /sys/class/thermal/ folder as cooling_device[0-*]. It tries to bind itself
+ * to all the thermal zone devices registered at the same time.
+ *
+ * Return: a pointer to the created struct thermal_cooling_device or an
+ * ERR_PTR. Caller must check return value with IS_ERR*() helpers.
*/
struct thermal_cooling_device *
thermal_cooling_device_register(char *type, void *devdata,
@@ -1289,7 +1323,7 @@ thermal_cooling_device_register(char *type, void *devdata,
return ERR_PTR(result);
}
- strcpy(cdev->type, type ? : "");
+ strlcpy(cdev->type, type ? : "", sizeof(cdev->type));
mutex_init(&cdev->lock);
INIT_LIST_HEAD(&cdev->thermal_instances);
cdev->ops = ops;
@@ -1334,7 +1368,7 @@ unregister:
device_unregister(&cdev->device);
return ERR_PTR(result);
}
-EXPORT_SYMBOL(thermal_cooling_device_register);
+EXPORT_SYMBOL_GPL(thermal_cooling_device_register);
/**
* thermal_cooling_device_unregister - removes the registered thermal cooling device
@@ -1394,7 +1428,7 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev)
device_unregister(&cdev->device);
return;
}
-EXPORT_SYMBOL(thermal_cooling_device_unregister);
+EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister);
void thermal_cdev_update(struct thermal_cooling_device *cdev)
{
@@ -1420,7 +1454,7 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev)
EXPORT_SYMBOL(thermal_cdev_update);
/**
- * notify_thermal_framework - Sensor drivers use this API to notify framework
+ * thermal_notify_framework - Sensor drivers use this API to notify framework
* @tz: thermal zone device
* @trip: indicates which trip point has been crossed
*
@@ -1431,16 +1465,21 @@ EXPORT_SYMBOL(thermal_cdev_update);
* The throttling policy is based on the configured platform data; if no
* platform data is provided, this uses the step_wise throttling policy.
*/
-void notify_thermal_framework(struct thermal_zone_device *tz, int trip)
+void thermal_notify_framework(struct thermal_zone_device *tz, int trip)
{
handle_thermal_trip(tz, trip);
}
-EXPORT_SYMBOL(notify_thermal_framework);
+EXPORT_SYMBOL_GPL(thermal_notify_framework);
/**
- * create_trip_attrs - create attributes for trip points
+ * create_trip_attrs() - create attributes for trip points
* @tz: the thermal zone device
* @mask: Writeable trip point bitmap.
+ *
+ * helper function to instantiate sysfs entries for every trip
+ * point and its properties of a struct thermal_zone_device.
+ *
+ * Return: 0 on success, the proper error value otherwise.
*/
static int create_trip_attrs(struct thermal_zone_device *tz, int mask)
{
@@ -1541,7 +1580,7 @@ static void remove_trip_attrs(struct thermal_zone_device *tz)
}
/**
- * thermal_zone_device_register - register a new thermal zone device
+ * thermal_zone_device_register() - register a new thermal zone device
* @type: the thermal zone device type
* @trips: the number of trip points the thermal zone support
* @mask: a bit string indicating the writeablility of trip points
@@ -1554,8 +1593,15 @@ static void remove_trip_attrs(struct thermal_zone_device *tz)
* whether trip points have been crossed (0 for interrupt
* driven systems)
*
+ * This interface function adds a new thermal zone device (sensor) to
+ * /sys/class/thermal folder as thermal_zone[0-*]. It tries to bind all the
+ * thermal cooling devices registered at the same time.
* thermal_zone_device_unregister() must be called when the device is no
* longer needed. The passive cooling depends on the .get_trend() return value.
+ *
+ * Return: a pointer to the created struct thermal_zone_device or an
+ * in case of error, an ERR_PTR. Caller must check return value with
+ * IS_ERR*() helpers.
*/
struct thermal_zone_device *thermal_zone_device_register(const char *type,
int trips, int mask, void *devdata,
@@ -1594,7 +1640,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
return ERR_PTR(result);
}
- strcpy(tz->type, type ? : "");
+ strlcpy(tz->type, type ? : "", sizeof(tz->type));
tz->ops = ops;
tz->tzp = tzp;
tz->device.class = &thermal_class;
@@ -1687,7 +1733,7 @@ unregister:
device_unregister(&tz->device);
return ERR_PTR(result);
}
-EXPORT_SYMBOL(thermal_zone_device_register);
+EXPORT_SYMBOL_GPL(thermal_zone_device_register);
/**
* thermal_device_unregister - removes the registered thermal zone device
@@ -1754,7 +1800,45 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
device_unregister(&tz->device);
return;
}
-EXPORT_SYMBOL(thermal_zone_device_unregister);
+EXPORT_SYMBOL_GPL(thermal_zone_device_unregister);
+
+/**
+ * thermal_zone_get_zone_by_name() - search for a zone and returns its ref
+ * @name: thermal zone name to fetch the temperature
+ *
+ * When only one zone is found with the passed name, returns a reference to it.
+ *
+ * Return: On success returns a reference to an unique thermal zone with
+ * matching name equals to @name, an ERR_PTR otherwise (-EINVAL for invalid
+ * paramenters, -ENODEV for not found and -EEXIST for multiple matches).
+ */
+struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name)
+{
+ struct thermal_zone_device *pos = NULL, *ref = ERR_PTR(-EINVAL);
+ unsigned int found = 0;
+
+ if (!name)
+ goto exit;
+
+ mutex_lock(&thermal_list_lock);
+ list_for_each_entry(pos, &thermal_tz_list, node)
+ if (!strnicmp(name, pos->type, THERMAL_NAME_LENGTH)) {
+ found++;
+ ref = pos;
+ }
+ mutex_unlock(&thermal_list_lock);
+
+ /* nothing has been found, thus an error code for it */
+ if (found == 0)
+ ref = ERR_PTR(-ENODEV);
+ else if (found > 1)
+ /* Success only when an unique zone is found */
+ ref = ERR_PTR(-EEXIST);
+
+exit:
+ return ref;
+}
+EXPORT_SYMBOL_GPL(thermal_zone_get_zone_by_name);
#ifdef CONFIG_NET
static struct genl_family thermal_event_genl_family = {
@@ -1832,7 +1916,7 @@ int thermal_generate_netlink_event(struct thermal_zone_device *tz,
return result;
}
-EXPORT_SYMBOL(thermal_generate_netlink_event);
+EXPORT_SYMBOL_GPL(thermal_generate_netlink_event);
static int genetlink_init(void)
{
@@ -1858,30 +1942,69 @@ static inline int genetlink_init(void) { return 0; }
static inline void genetlink_exit(void) {}
#endif /* !CONFIG_NET */
+static int __init thermal_register_governors(void)
+{
+ int result;
+
+ result = thermal_gov_step_wise_register();
+ if (result)
+ return result;
+
+ result = thermal_gov_fair_share_register();
+ if (result)
+ return result;
+
+ return thermal_gov_user_space_register();
+}
+
+static void thermal_unregister_governors(void)
+{
+ thermal_gov_step_wise_unregister();
+ thermal_gov_fair_share_unregister();
+ thermal_gov_user_space_unregister();
+}
+
static int __init thermal_init(void)
{
- int result = 0;
+ int result;
+
+ result = thermal_register_governors();
+ if (result)
+ goto error;
result = class_register(&thermal_class);
- if (result) {
- idr_destroy(&thermal_tz_idr);
- idr_destroy(&thermal_cdev_idr);
- mutex_destroy(&thermal_idr_lock);
- mutex_destroy(&thermal_list_lock);
- return result;
- }
+ if (result)
+ goto unregister_governors;
+
result = genetlink_init();
+ if (result)
+ goto unregister_class;
+
+ return 0;
+
+unregister_governors:
+ thermal_unregister_governors();
+unregister_class:
+ class_unregister(&thermal_class);
+error:
+ idr_destroy(&thermal_tz_idr);
+ idr_destroy(&thermal_cdev_idr);
+ mutex_destroy(&thermal_idr_lock);
+ mutex_destroy(&thermal_list_lock);
+ mutex_destroy(&thermal_governor_lock);
return result;
}
static void __exit thermal_exit(void)
{
+ genetlink_exit();
class_unregister(&thermal_class);
+ thermal_unregister_governors();
idr_destroy(&thermal_tz_idr);
idr_destroy(&thermal_cdev_idr);
mutex_destroy(&thermal_idr_lock);
mutex_destroy(&thermal_list_lock);
- genetlink_exit();
+ mutex_destroy(&thermal_governor_lock);
}
fs_initcall(thermal_init);
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 0d3205a..7cf2f66 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -50,4 +50,31 @@ struct thermal_instance {
struct list_head cdev_node; /* node in cdev->thermal_instances */
};
+int thermal_register_governor(struct thermal_governor *);
+void thermal_unregister_governor(struct thermal_governor *);
+
+#ifdef CONFIG_THERMAL_GOV_STEP_WISE
+int thermal_gov_step_wise_register(void);
+void thermal_gov_step_wise_unregister(void);
+#else
+static inline int thermal_gov_step_wise_register(void) { return 0; }
+static inline void thermal_gov_step_wise_unregister(void) {}
+#endif /* CONFIG_THERMAL_GOV_STEP_WISE */
+
+#ifdef CONFIG_THERMAL_GOV_FAIR_SHARE
+int thermal_gov_fair_share_register(void);
+void thermal_gov_fair_share_unregister(void);
+#else
+static inline int thermal_gov_fair_share_register(void) { return 0; }
+static inline void thermal_gov_fair_share_unregister(void) {}
+#endif /* CONFIG_THERMAL_GOV_FAIR_SHARE */
+
+#ifdef CONFIG_THERMAL_GOV_USER_SPACE
+int thermal_gov_user_space_register(void);
+void thermal_gov_user_space_unregister(void);
+#else
+static inline int thermal_gov_user_space_register(void) { return 0; }
+static inline void thermal_gov_user_space_unregister(void) {}
+#endif /* CONFIG_THERMAL_GOV_USER_SPACE */
+
#endif /* __THERMAL_CORE_H__ */
diff --git a/drivers/thermal/user_space.c b/drivers/thermal/user_space.c
index 6bbb380..10adcdd 100644
--- a/drivers/thermal/user_space.c
+++ b/drivers/thermal/user_space.c
@@ -22,9 +22,6 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
#include <linux/thermal.h>
#include "thermal_core.h"
@@ -46,23 +43,15 @@ static int notify_user_space(struct thermal_zone_device *tz, int trip)
static struct thermal_governor thermal_gov_user_space = {
.name = "user_space",
.throttle = notify_user_space,
- .owner = THIS_MODULE,
};
-static int __init thermal_gov_user_space_init(void)
+int thermal_gov_user_space_register(void)
{
return thermal_register_governor(&thermal_gov_user_space);
}
-static void __exit thermal_gov_user_space_exit(void)
+void thermal_gov_user_space_unregister(void)
{
thermal_unregister_governor(&thermal_gov_user_space);
}
-/* This should load after thermal framework */
-fs_initcall(thermal_gov_user_space_init);
-module_exit(thermal_gov_user_space_exit);
-
-MODULE_AUTHOR("Durgadoss R");
-MODULE_DESCRIPTION("A user space Thermal notifier");
-MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/68328serial.c b/drivers/tty/serial/68328serial.c
index ef2e08e..5dc9c4b 100644
--- a/drivers/tty/serial/68328serial.c
+++ b/drivers/tty/serial/68328serial.c
@@ -14,7 +14,6 @@
* 2.4/2.5 port David McCullough
*/
-#include <asm/dbg.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/serial.h>
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 52a3ecd..6fa2ae7 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -30,7 +30,6 @@
#include <linux/serial.h>
#include <linux/serial_core.h>
-#include <bcm63xx_clk.h>
#include <bcm63xx_irq.h>
#include <bcm63xx_regs.h>
#include <bcm63xx_io.h>
diff --git a/drivers/usb/host/ehci-tilegx.c b/drivers/usb/host/ehci-tilegx.c
index 1d215cd..b083a35 100644
--- a/drivers/usb/host/ehci-tilegx.c
+++ b/drivers/usb/host/ehci-tilegx.c
@@ -118,8 +118,10 @@ static int ehci_hcd_tilegx_drv_probe(struct platform_device *pdev)
hcd = usb_create_hcd(&ehci_tilegx_hc_driver, &pdev->dev,
dev_name(&pdev->dev));
- if (!hcd)
- return -ENOMEM;
+ if (!hcd) {
+ ret = -ENOMEM;
+ goto err_hcd;
+ }
/*
* We don't use rsrc_start to map in our registers, but seems like
@@ -176,6 +178,7 @@ err_have_irq:
err_no_irq:
tilegx_stop_ehc();
usb_put_hcd(hcd);
+err_hcd:
gxio_usb_host_destroy(&pdata->usb_ctx);
return ret;
}
diff --git a/drivers/usb/host/ohci-tilegx.c b/drivers/usb/host/ohci-tilegx.c
index 1ae7b28..ea73009 100644
--- a/drivers/usb/host/ohci-tilegx.c
+++ b/drivers/usb/host/ohci-tilegx.c
@@ -112,8 +112,10 @@ static int ohci_hcd_tilegx_drv_probe(struct platform_device *pdev)
hcd = usb_create_hcd(&ohci_tilegx_hc_driver, &pdev->dev,
dev_name(&pdev->dev));
- if (!hcd)
- return -ENOMEM;
+ if (!hcd) {
+ ret = -ENOMEM;
+ goto err_hcd;
+ }
/*
* We don't use rsrc_start to map in our registers, but seems like
@@ -165,6 +167,7 @@ err_have_irq:
err_no_irq:
tilegx_stop_ohc();
usb_put_hcd(hcd);
+err_hcd:
gxio_usb_host_destroy(&pdata->usb_ctx);
return ret;
}
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index aab2ab2..371d0e7 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -128,7 +128,7 @@ config TWL6030_USB
config USB_GPIO_VBUS
tristate "GPIO based peripheral-only VBUS sensing 'transceiver'"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
Provides simple GPIO VBUS sensing for controllers with an
internal transceiver via the usb_phy interface, and
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index c04ccdf..d71d60f 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -2429,7 +2429,7 @@ config FB_MXS
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_MODE_HELPERS
- select OF_VIDEOMODE
+ select VIDEOMODE_HELPERS
help
Framebuffer support for the MXS SoC.
@@ -2483,7 +2483,7 @@ config FB_SSD1307
tristate "Solomon SSD1307 framebuffer support"
depends on FB && I2C
depends on OF
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select FB_SYS_FOPS
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
index ddabaa8..700cac0 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/au1100fb.c
@@ -111,30 +111,16 @@ static int au1100fb_fb_blank(int blank_mode, struct fb_info *fbi)
switch (blank_mode) {
case VESA_NO_BLANKING:
- /* Turn on panel */
- fbdev->regs->lcd_control |= LCD_CONTROL_GO;
-#ifdef CONFIG_MIPS_PB1100
- if (fbdev->panel_idx == 1) {
- au_writew(au_readw(PB1100_G_CONTROL)
- | (PB1100_G_CONTROL_BL | PB1100_G_CONTROL_VDD),
- PB1100_G_CONTROL);
- }
-#endif
+ /* Turn on panel */
+ fbdev->regs->lcd_control |= LCD_CONTROL_GO;
au_sync();
break;
case VESA_VSYNC_SUSPEND:
case VESA_HSYNC_SUSPEND:
case VESA_POWERDOWN:
- /* Turn off panel */
- fbdev->regs->lcd_control &= ~LCD_CONTROL_GO;
-#ifdef CONFIG_MIPS_PB1100
- if (fbdev->panel_idx == 1) {
- au_writew(au_readw(PB1100_G_CONTROL)
- & ~(PB1100_G_CONTROL_BL | PB1100_G_CONTROL_VDD),
- PB1100_G_CONTROL);
- }
-#endif
+ /* Turn off panel */
+ fbdev->regs->lcd_control &= ~LCD_CONTROL_GO;
au_sync();
break;
default:
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 2e166c3..d5ab658 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -36,14 +36,14 @@ config LCD_CORGI
config LCD_L4F00242T03
tristate "Epson L4F00242T03 LCD"
- depends on SPI_MASTER && GENERIC_GPIO
+ depends on SPI_MASTER && GPIOLIB
help
SPI driver for Epson L4F00242T03. This provides basic support
for init and powering the LCD up/down through a sysfs interface.
config LCD_LMS283GF05
tristate "Samsung LMS283GF05 LCD"
- depends on SPI_MASTER && GENERIC_GPIO
+ depends on SPI_MASTER && GPIOLIB
help
SPI driver for Samsung LMS283GF05. This provides basic support
for powering the LCD up/down through a sysfs interface.
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index 1b2c26d..21223d4 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -42,7 +42,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/of_device.h>
-#include <video/of_display_timing.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
@@ -50,6 +49,7 @@
#include <linux/pinctrl/consumer.h>
#include <linux/fb.h>
#include <linux/regulator/consumer.h>
+#include <video/of_display_timing.h>
#include <video/videomode.h>
#define REG_SET 4
@@ -777,16 +777,16 @@ static int mxsfb_init_fbinfo_dt(struct mxsfb_info *host)
struct videomode vm;
struct fb_videomode fb_vm;
- ret = videomode_from_timing(timings, &vm, i);
+ ret = videomode_from_timings(timings, &vm, i);
if (ret < 0)
goto put_timings_node;
ret = fb_videomode_from_videomode(&vm, &fb_vm);
if (ret < 0)
goto put_timings_node;
- if (vm.data_flags & DISPLAY_FLAGS_DE_HIGH)
+ if (vm.flags & DISPLAY_FLAGS_DE_HIGH)
host->sync |= MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
- if (vm.data_flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
+ if (vm.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
host->sync |= MXSFB_SYNC_DOTCLK_FALLING_ACT;
fb_add_videomode(&fb_vm, &fb_info->modelist);
}
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index e8ca63a..2bd1257 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -50,7 +50,7 @@ config W1_MASTER_DS1WM
config W1_MASTER_GPIO
tristate "GPIO 1-wire busmaster"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
Say Y here if you want to communicate with your 1-wire devices using
GPIO pins. This driver uses the GPIO API to control the wire.
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
index 8987990..d184c48 100644
--- a/drivers/watchdog/ath79_wdt.c
+++ b/drivers/watchdog/ath79_wdt.c
@@ -253,11 +253,9 @@ static int ath79_wdt_probe(struct platform_device *pdev)
return -EINVAL;
}
- wdt_base = devm_request_and_ioremap(&pdev->dev, res);
- if (!wdt_base) {
- dev_err(&pdev->dev, "unable to remap memory region\n");
- return -ENOMEM;
- }
+ wdt_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(wdt_base))
+ return PTR_ERR(wdt_base);
wdt_clk = devm_clk_get(&pdev->dev, "wdt");
if (IS_ERR(wdt_clk))
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index 7df1fdc..100d4fb 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -27,6 +27,7 @@
#include <linux/device.h>
#include <linux/clk.h>
#include <linux/slab.h>
+#include <linux/err.h>
#define MODULE_NAME "DAVINCI-WDT: "
@@ -221,11 +222,9 @@ static int davinci_wdt_probe(struct platform_device *pdev)
return -ENOENT;
}
- wdt_base = devm_request_and_ioremap(dev, wdt_mem);
- if (!wdt_base) {
- dev_err(dev, "ioremap failed\n");
- return -EADDRNOTAVAIL;
- }
+ wdt_base = devm_ioremap_resource(dev, wdt_mem);
+ if (IS_ERR(wdt_base))
+ return PTR_ERR(wdt_base);
ret = misc_register(&davinci_wdt_miscdev);
if (ret < 0) {
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index c1a221c..ee03135 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -330,10 +330,9 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
}
/* get the memory region for the watchdog timer */
- wdt_base = devm_request_and_ioremap(dev, wdt_mem);
- if (wdt_base == NULL) {
- dev_err(dev, "failed to devm_request_and_ioremap() region\n");
- ret = -ENOMEM;
+ wdt_base = devm_ioremap_resource(dev, wdt_mem);
+ if (IS_ERR(wdt_base)) {
+ ret = PTR_ERR(wdt_base);
goto err;
}
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index 6a89e40..6185af2 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -34,6 +34,7 @@
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/clk.h>
+#include <linux/err.h>
#include <asm/watchdog.h>
#define DRV_NAME "sh-wdt"
@@ -249,9 +250,9 @@ static int sh_wdt_probe(struct platform_device *pdev)
wdt->clk = NULL;
}
- wdt->base = devm_request_and_ioremap(wdt->dev, res);
- if (unlikely(!wdt->base)) {
- rc = -EADDRNOTAVAIL;
+ wdt->base = devm_ioremap_resource(wdt->dev, res);
+ if (IS_ERR(wdt->base)) {
+ rc = PTR_ERR(wdt->base);
goto err;
}
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 08b48bb..faf4e18 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -523,6 +523,7 @@ int watchdog_dev_register(struct watchdog_device *watchdog)
int err, devno;
if (watchdog->id == 0) {
+ old_wdd = watchdog;
watchdog_miscdev.parent = watchdog->parent;
err = misc_register(&watchdog_miscdev);
if (err != 0) {
@@ -531,9 +532,9 @@ int watchdog_dev_register(struct watchdog_device *watchdog)
if (err == -EBUSY)
pr_err("%s: a legacy watchdog module is probably present.\n",
watchdog->info->identity);
+ old_wdd = NULL;
return err;
}
- old_wdd = watchdog;
}
/* Fill in the data structures */