summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-22 01:55:10 (GMT)
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-22 01:55:10 (GMT)
commit5375871d432ae9fc581014ac117b96aaee3cd0c7 (patch)
treebe98e8255b0f927fb920fb532a598b93fa140dbe /drivers
parentb57cb7231b2ce52d3dda14a7b417ae125fb2eb97 (diff)
parentdfbc2d75c1bd47c3186fa91f1655ea2f3825b0ec (diff)
downloadlinux-fsl-qoriq-5375871d432ae9fc581014ac117b96aaee3cd0c7.tar.xz
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc merge from Benjamin Herrenschmidt: "Here's the powerpc batch for this merge window. It is going to be a bit more nasty than usual as in touching things outside of arch/powerpc mostly due to the big iSeriesectomy :-) We finally got rid of the bugger (legacy iSeries support) which was a PITA to maintain and that nobody really used anymore. Here are some of the highlights: - Legacy iSeries is gone. Thanks Stephen ! There's still some bits and pieces remaining if you do a grep -ir series arch/powerpc but they are harmless and will be removed in the next few weeks hopefully. - The 'fadump' functionality (Firmware Assisted Dump) replaces the previous (equivalent) "pHyp assisted dump"... it's a rewrite of a mechanism to get the hypervisor to do crash dumps on pSeries, the new implementation hopefully being much more reliable. Thanks Mahesh Salgaonkar. - The "EEH" code (pSeries PCI error handling & recovery) got a big spring cleaning, motivated by the need to be able to implement a new backend for it on top of some new different type of firwmare. The work isn't complete yet, but a good chunk of the cleanups is there. Note that this adds a field to struct device_node which is not very nice and which Grant objects to. I will have a patch soon that moves that to a powerpc private data structure (hopefully before rc1) and we'll improve things further later on (hopefully getting rid of the need for that pointer completely). Thanks Gavin Shan. - I dug into our exception & interrupt handling code to improve the way we do lazy interrupt handling (and make it work properly with "edge" triggered interrupt sources), and while at it found & fixed a wagon of issues in those areas, including adding support for page fault retry & fatal signals on page faults. - Your usual random batch of small fixes & updates, including a bunch of new embedded boards, both Freescale and APM based ones, etc..." I fixed up some conflicts with the generalized irq-domain changes from Grant Likely, hopefully correctly. * 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (141 commits) powerpc/ps3: Do not adjust the wrapper load address powerpc: Remove the rest of the legacy iSeries include files powerpc: Remove the remaining CONFIG_PPC_ISERIES pieces init: Remove CONFIG_PPC_ISERIES powerpc: Remove FW_FEATURE ISERIES from arch code tty/hvc_vio: FW_FEATURE_ISERIES is no longer selectable powerpc/spufs: Fix double unlocks powerpc/5200: convert mpc5200 to use of_platform_populate() powerpc/mpc5200: add options to mpc5200_defconfig powerpc/mpc52xx: add a4m072 board support powerpc/mpc5200: update mpc5200_defconfig to fit for charon board Documentation/powerpc/mpc52xx.txt: Checkpatch cleanup powerpc/44x: Add additional device support for APM821xx SoC and Bluestone board powerpc/44x: Add support PCI-E for APM821xx SoC and Bluestone board MAINTAINERS: Update PowerPC 4xx tree powerpc/44x: The bug fixed support for APM821xx SoC and Bluestone board powerpc: document the FSL MPIC message register binding powerpc: add support for MPIC message register API powerpc/fsl: Added aliased MSIIR register address to MSI node in dts powerpc/85xx: mpc8548cds - add 36-bit dts ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/driver.c30
-rw-r--r--drivers/block/viodasd.c809
-rw-r--r--drivers/cdrom/viocd.c739
-rw-r--r--drivers/char/viotape.c1041
-rw-r--r--drivers/gpio/Kconfig11
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/gpio-ge.c199
-rw-r--r--drivers/misc/carma/carma-fpga.c114
-rw-r--r--drivers/mtd/nand/Kconfig10
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c1072
-rw-r--r--drivers/scsi/Kconfig3
-rw-r--r--drivers/scsi/ibmvscsi/Makefile1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c12
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h1
-rw-r--r--drivers/scsi/ibmvscsi/iseries_vscsi.c173
-rw-r--r--drivers/tty/hvc/Kconfig14
-rw-r--r--drivers/tty/hvc/Makefile1
-rw-r--r--drivers/tty/hvc/hvc_iseries.c599
-rw-r--r--drivers/tty/hvc/hvc_udbg.c8
-rw-r--r--drivers/tty/hvc/hvc_vio.c4
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/watchdog/Kconfig2
23 files changed, 1382 insertions, 3465 deletions
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 60e4f77..3ec3896 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -123,36 +123,6 @@ void driver_remove_file(struct device_driver *drv,
}
EXPORT_SYMBOL_GPL(driver_remove_file);
-/**
- * driver_add_kobj - add a kobject below the specified driver
- * @drv: requesting device driver
- * @kobj: kobject to add below this driver
- * @fmt: format string that names the kobject
- *
- * You really don't want to do this, this is only here due to one looney
- * iseries driver, go poke those developers if you are annoyed about
- * this...
- */
-int driver_add_kobj(struct device_driver *drv, struct kobject *kobj,
- const char *fmt, ...)
-{
- va_list args;
- char *name;
- int ret;
-
- va_start(args, fmt);
- name = kvasprintf(GFP_KERNEL, fmt, args);
- va_end(args);
-
- if (!name)
- return -ENOMEM;
-
- ret = kobject_add(kobj, &drv->p->kobj, "%s", name);
- kfree(name);
- return ret;
-}
-EXPORT_SYMBOL_GPL(driver_add_kobj);
-
static int driver_add_groups(struct device_driver *drv,
const struct attribute_group **groups)
{
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
deleted file mode 100644
index 9a5b2a2..0000000
--- a/drivers/block/viodasd.c
+++ /dev/null
@@ -1,809 +0,0 @@
-/* -*- linux-c -*-
- * viodasd.c
- * Authors: Dave Boutcher <boutcher@us.ibm.com>
- * Ryan Arnold <ryanarn@us.ibm.com>
- * Colin Devilbiss <devilbis@us.ibm.com>
- * Stephen Rothwell
- *
- * (C) Copyright 2000-2004 IBM Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This routine provides access to disk space (termed "DASD" in historical
- * IBM terms) owned and managed by an OS/400 partition running on the
- * same box as this Linux partition.
- *
- * All disk operations are performed by sending messages back and forth to
- * the OS/400 partition.
- */
-
-#define pr_fmt(fmt) "viod: " fmt
-
-#include <linux/major.h>
-#include <linux/fs.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/blkdev.h>
-#include <linux/genhd.h>
-#include <linux/hdreg.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/mutex.h>
-#include <linux/dma-mapping.h>
-#include <linux/completion.h>
-#include <linux/device.h>
-#include <linux/scatterlist.h>
-
-#include <asm/uaccess.h>
-#include <asm/vio.h>
-#include <asm/iseries/hv_types.h>
-#include <asm/iseries/hv_lp_event.h>
-#include <asm/iseries/hv_lp_config.h>
-#include <asm/iseries/vio.h>
-#include <asm/firmware.h>
-
-MODULE_DESCRIPTION("iSeries Virtual DASD");
-MODULE_AUTHOR("Dave Boutcher");
-MODULE_LICENSE("GPL");
-
-/*
- * We only support 7 partitions per physical disk....so with minor
- * numbers 0-255 we get a maximum of 32 disks.
- */
-#define VIOD_GENHD_NAME "iseries/vd"
-
-#define VIOD_VERS "1.64"
-
-enum {
- PARTITION_SHIFT = 3,
- MAX_DISKNO = HVMAXARCHITECTEDVIRTUALDISKS,
- MAX_DISK_NAME = FIELD_SIZEOF(struct gendisk, disk_name)
-};
-
-static DEFINE_MUTEX(viodasd_mutex);
-static DEFINE_SPINLOCK(viodasd_spinlock);
-
-#define VIOMAXREQ 16
-
-#define DEVICE_NO(cell) ((struct viodasd_device *)(cell) - &viodasd_devices[0])
-
-struct viodasd_waitevent {
- struct completion com;
- int rc;
- u16 sub_result;
- int max_disk; /* open */
-};
-
-static const struct vio_error_entry viodasd_err_table[] = {
- { 0x0201, EINVAL, "Invalid Range" },
- { 0x0202, EINVAL, "Invalid Token" },
- { 0x0203, EIO, "DMA Error" },
- { 0x0204, EIO, "Use Error" },
- { 0x0205, EIO, "Release Error" },
- { 0x0206, EINVAL, "Invalid Disk" },
- { 0x0207, EBUSY, "Can't Lock" },
- { 0x0208, EIO, "Already Locked" },
- { 0x0209, EIO, "Already Unlocked" },
- { 0x020A, EIO, "Invalid Arg" },
- { 0x020B, EIO, "Bad IFS File" },
- { 0x020C, EROFS, "Read Only Device" },
- { 0x02FF, EIO, "Internal Error" },
- { 0x0000, 0, NULL },
-};
-
-/*
- * Figure out the biggest I/O request (in sectors) we can accept
- */
-#define VIODASD_MAXSECTORS (4096 / 512 * VIOMAXBLOCKDMA)
-
-/*
- * Number of disk I/O requests we've sent to OS/400
- */
-static int num_req_outstanding;
-
-/*
- * This is our internal structure for keeping track of disk devices
- */
-struct viodasd_device {
- u16 cylinders;
- u16 tracks;
- u16 sectors;
- u16 bytes_per_sector;
- u64 size;
- int read_only;
- spinlock_t q_lock;
- struct gendisk *disk;
- struct device *dev;
-} viodasd_devices[MAX_DISKNO];
-
-/*
- * External open entry point.
- */
-static int viodasd_open(struct block_device *bdev, fmode_t mode)
-{
- struct viodasd_device *d = bdev->bd_disk->private_data;
- HvLpEvent_Rc hvrc;
- struct viodasd_waitevent we;
- u16 flags = 0;
-
- if (d->read_only) {
- if (mode & FMODE_WRITE)
- return -EROFS;
- flags = vioblockflags_ro;
- }
-
- init_completion(&we.com);
-
- /* Send the open event to OS/400 */
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_blockio | vioblockopen,
- HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- (u64)(unsigned long)&we, VIOVERSION << 16,
- ((u64)DEVICE_NO(d) << 48) | ((u64)flags << 32),
- 0, 0, 0);
- if (hvrc != 0) {
- pr_warning("HV open failed %d\n", (int)hvrc);
- return -EIO;
- }
-
- wait_for_completion(&we.com);
-
- /* Check the return code */
- if (we.rc != 0) {
- const struct vio_error_entry *err =
- vio_lookup_rc(viodasd_err_table, we.sub_result);
-
- pr_warning("bad rc opening disk: %d:0x%04x (%s)\n",
- (int)we.rc, we.sub_result, err->msg);
- return -EIO;
- }
-
- return 0;
-}
-
-static int viodasd_unlocked_open(struct block_device *bdev, fmode_t mode)
-{
- int ret;
-
- mutex_lock(&viodasd_mutex);
- ret = viodasd_open(bdev, mode);
- mutex_unlock(&viodasd_mutex);
-
- return ret;
-}
-
-
-/*
- * External release entry point.
- */
-static int viodasd_release(struct gendisk *disk, fmode_t mode)
-{
- struct viodasd_device *d = disk->private_data;
- HvLpEvent_Rc hvrc;
-
- mutex_lock(&viodasd_mutex);
- /* Send the event to OS/400. We DON'T expect a response */
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_blockio | vioblockclose,
- HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- 0, VIOVERSION << 16,
- ((u64)DEVICE_NO(d) << 48) /* | ((u64)flags << 32) */,
- 0, 0, 0);
- if (hvrc != 0)
- pr_warning("HV close call failed %d\n", (int)hvrc);
-
- mutex_unlock(&viodasd_mutex);
-
- return 0;
-}
-
-
-/* External ioctl entry point.
- */
-static int viodasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- struct gendisk *disk = bdev->bd_disk;
- struct viodasd_device *d = disk->private_data;
-
- geo->sectors = d->sectors ? d->sectors : 32;
- geo->heads = d->tracks ? d->tracks : 64;
- geo->cylinders = d->cylinders ? d->cylinders :
- get_capacity(disk) / (geo->sectors * geo->heads);
-
- return 0;
-}
-
-/*
- * Our file operations table
- */
-static const struct block_device_operations viodasd_fops = {
- .owner = THIS_MODULE,
- .open = viodasd_unlocked_open,
- .release = viodasd_release,
- .getgeo = viodasd_getgeo,
-};
-
-/*
- * End a request
- */
-static void viodasd_end_request(struct request *req, int error,
- int num_sectors)
-{
- __blk_end_request(req, error, num_sectors << 9);
-}
-
-/*
- * Send an actual I/O request to OS/400
- */
-static int send_request(struct request *req)
-{
- u64 start;
- int direction;
- int nsg;
- u16 viocmd;
- HvLpEvent_Rc hvrc;
- struct vioblocklpevent *bevent;
- struct HvLpEvent *hev;
- struct scatterlist sg[VIOMAXBLOCKDMA];
- int sgindex;
- struct viodasd_device *d;
- unsigned long flags;
-
- start = (u64)blk_rq_pos(req) << 9;
-
- if (rq_data_dir(req) == READ) {
- direction = DMA_FROM_DEVICE;
- viocmd = viomajorsubtype_blockio | vioblockread;
- } else {
- direction = DMA_TO_DEVICE;
- viocmd = viomajorsubtype_blockio | vioblockwrite;
- }
-
- d = req->rq_disk->private_data;
-
- /* Now build the scatter-gather list */
- sg_init_table(sg, VIOMAXBLOCKDMA);
- nsg = blk_rq_map_sg(req->q, req, sg);
- nsg = dma_map_sg(d->dev, sg, nsg, direction);
-
- spin_lock_irqsave(&viodasd_spinlock, flags);
- num_req_outstanding++;
-
- /* This optimization handles a single DMA block */
- if (nsg == 1)
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo, viocmd,
- HvLpEvent_AckInd_DoAck,
- HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- (u64)(unsigned long)req, VIOVERSION << 16,
- ((u64)DEVICE_NO(d) << 48), start,
- ((u64)sg_dma_address(&sg[0])) << 32,
- sg_dma_len(&sg[0]));
- else {
- bevent = (struct vioblocklpevent *)
- vio_get_event_buffer(viomajorsubtype_blockio);
- if (bevent == NULL) {
- pr_warning("error allocating disk event buffer\n");
- goto error_ret;
- }
-
- /*
- * Now build up the actual request. Note that we store
- * the pointer to the request in the correlation
- * token so we can match the response up later
- */
- memset(bevent, 0, sizeof(struct vioblocklpevent));
- hev = &bevent->event;
- hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DO_ACK |
- HV_LP_EVENT_INT;
- hev->xType = HvLpEvent_Type_VirtualIo;
- hev->xSubtype = viocmd;
- hev->xSourceLp = HvLpConfig_getLpIndex();
- hev->xTargetLp = viopath_hostLp;
- hev->xSizeMinus1 =
- offsetof(struct vioblocklpevent, u.rw_data.dma_info) +
- (sizeof(bevent->u.rw_data.dma_info[0]) * nsg) - 1;
- hev->xSourceInstanceId = viopath_sourceinst(viopath_hostLp);
- hev->xTargetInstanceId = viopath_targetinst(viopath_hostLp);
- hev->xCorrelationToken = (u64)req;
- bevent->version = VIOVERSION;
- bevent->disk = DEVICE_NO(d);
- bevent->u.rw_data.offset = start;
-
- /*
- * Copy just the dma information from the sg list
- * into the request
- */
- for (sgindex = 0; sgindex < nsg; sgindex++) {
- bevent->u.rw_data.dma_info[sgindex].token =
- sg_dma_address(&sg[sgindex]);
- bevent->u.rw_data.dma_info[sgindex].len =
- sg_dma_len(&sg[sgindex]);
- }
-
- /* Send the request */
- hvrc = HvCallEvent_signalLpEvent(&bevent->event);
- vio_free_event_buffer(viomajorsubtype_blockio, bevent);
- }
-
- if (hvrc != HvLpEvent_Rc_Good) {
- pr_warning("error sending disk event to OS/400 (rc %d)\n",
- (int)hvrc);
- goto error_ret;
- }
- spin_unlock_irqrestore(&viodasd_spinlock, flags);
- return 0;
-
-error_ret:
- num_req_outstanding--;
- spin_unlock_irqrestore(&viodasd_spinlock, flags);
- dma_unmap_sg(d->dev, sg, nsg, direction);
- return -1;
-}
-
-/*
- * This is the external request processing routine
- */
-static void do_viodasd_request(struct request_queue *q)
-{
- struct request *req;
-
- /*
- * If we already have the maximum number of requests
- * outstanding to OS/400 just bail out. We'll come
- * back later.
- */
- while (num_req_outstanding < VIOMAXREQ) {
- req = blk_fetch_request(q);
- if (req == NULL)
- return;
- /* check that request contains a valid command */
- if (req->cmd_type != REQ_TYPE_FS) {
- viodasd_end_request(req, -EIO, blk_rq_sectors(req));
- continue;
- }
- /* Try sending the request */
- if (send_request(req) != 0)
- viodasd_end_request(req, -EIO, blk_rq_sectors(req));
- }
-}
-
-/*
- * Probe a single disk and fill in the viodasd_device structure
- * for it.
- */
-static int probe_disk(struct viodasd_device *d)
-{
- HvLpEvent_Rc hvrc;
- struct viodasd_waitevent we;
- int dev_no = DEVICE_NO(d);
- struct gendisk *g;
- struct request_queue *q;
- u16 flags = 0;
-
-retry:
- init_completion(&we.com);
-
- /* Send the open event to OS/400 */
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_blockio | vioblockopen,
- HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- (u64)(unsigned long)&we, VIOVERSION << 16,
- ((u64)dev_no << 48) | ((u64)flags<< 32),
- 0, 0, 0);
- if (hvrc != 0) {
- pr_warning("bad rc on HV open %d\n", (int)hvrc);
- return 0;
- }
-
- wait_for_completion(&we.com);
-
- if (we.rc != 0) {
- if (flags != 0)
- return 0;
- /* try again with read only flag set */
- flags = vioblockflags_ro;
- goto retry;
- }
- if (we.max_disk > (MAX_DISKNO - 1)) {
- printk_once(KERN_INFO pr_fmt("Only examining the first %d of %d disks connected\n"),
- MAX_DISKNO, we.max_disk + 1);
- }
-
- /* Send the close event to OS/400. We DON'T expect a response */
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_blockio | vioblockclose,
- HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- 0, VIOVERSION << 16,
- ((u64)dev_no << 48) | ((u64)flags << 32),
- 0, 0, 0);
- if (hvrc != 0) {
- pr_warning("bad rc sending event to OS/400 %d\n", (int)hvrc);
- return 0;
- }
-
- if (d->dev == NULL) {
- /* this is when we reprobe for new disks */
- if (vio_create_viodasd(dev_no) == NULL) {
- pr_warning("cannot allocate virtual device for disk %d\n",
- dev_no);
- return 0;
- }
- /*
- * The vio_create_viodasd will have recursed into this
- * routine with d->dev set to the new vio device and
- * will finish the setup of the disk below.
- */
- return 1;
- }
-
- /* create the request queue for the disk */
- spin_lock_init(&d->q_lock);
- q = blk_init_queue(do_viodasd_request, &d->q_lock);
- if (q == NULL) {
- pr_warning("cannot allocate queue for disk %d\n", dev_no);
- return 0;
- }
- g = alloc_disk(1 << PARTITION_SHIFT);
- if (g == NULL) {
- pr_warning("cannot allocate disk structure for disk %d\n",
- dev_no);
- blk_cleanup_queue(q);
- return 0;
- }
-
- d->disk = g;
- blk_queue_max_segments(q, VIOMAXBLOCKDMA);
- blk_queue_max_hw_sectors(q, VIODASD_MAXSECTORS);
- g->major = VIODASD_MAJOR;
- g->first_minor = dev_no << PARTITION_SHIFT;
- if (dev_no >= 26)
- snprintf(g->disk_name, sizeof(g->disk_name),
- VIOD_GENHD_NAME "%c%c",
- 'a' + (dev_no / 26) - 1, 'a' + (dev_no % 26));
- else
- snprintf(g->disk_name, sizeof(g->disk_name),
- VIOD_GENHD_NAME "%c", 'a' + (dev_no % 26));
- g->fops = &viodasd_fops;
- g->queue = q;
- g->private_data = d;
- g->driverfs_dev = d->dev;
- set_capacity(g, d->size >> 9);
-
- pr_info("disk %d: %lu sectors (%lu MB) CHS=%d/%d/%d sector size %d%s\n",
- dev_no, (unsigned long)(d->size >> 9),
- (unsigned long)(d->size >> 20),
- (int)d->cylinders, (int)d->tracks,
- (int)d->sectors, (int)d->bytes_per_sector,
- d->read_only ? " (RO)" : "");
-
- /* register us in the global list */
- add_disk(g);
- return 1;
-}
-
-/* returns the total number of scatterlist elements converted */
-static int block_event_to_scatterlist(const struct vioblocklpevent *bevent,
- struct scatterlist *sg, int *total_len)
-{
- int i, numsg;
- const struct rw_data *rw_data = &bevent->u.rw_data;
- static const int offset =
- offsetof(struct vioblocklpevent, u.rw_data.dma_info);
- static const int element_size = sizeof(rw_data->dma_info[0]);
-
- numsg = ((bevent->event.xSizeMinus1 + 1) - offset) / element_size;
- if (numsg > VIOMAXBLOCKDMA)
- numsg = VIOMAXBLOCKDMA;
-
- *total_len = 0;
- sg_init_table(sg, VIOMAXBLOCKDMA);
- for (i = 0; (i < numsg) && (rw_data->dma_info[i].len > 0); ++i) {
- sg_dma_address(&sg[i]) = rw_data->dma_info[i].token;
- sg_dma_len(&sg[i]) = rw_data->dma_info[i].len;
- *total_len += rw_data->dma_info[i].len;
- }
- return i;
-}
-
-/*
- * Restart all queues, starting with the one _after_ the disk given,
- * thus reducing the chance of starvation of higher numbered disks.
- */
-static void viodasd_restart_all_queues_starting_from(int first_index)
-{
- int i;
-
- for (i = first_index + 1; i < MAX_DISKNO; ++i)
- if (viodasd_devices[i].disk)
- blk_run_queue(viodasd_devices[i].disk->queue);
- for (i = 0; i <= first_index; ++i)
- if (viodasd_devices[i].disk)
- blk_run_queue(viodasd_devices[i].disk->queue);
-}
-
-/*
- * For read and write requests, decrement the number of outstanding requests,
- * Free the DMA buffers we allocated.
- */
-static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
-{
- int num_sg, num_sect, pci_direction, total_len;
- struct request *req;
- struct scatterlist sg[VIOMAXBLOCKDMA];
- struct HvLpEvent *event = &bevent->event;
- unsigned long irq_flags;
- struct viodasd_device *d;
- int error;
- spinlock_t *qlock;
-
- num_sg = block_event_to_scatterlist(bevent, sg, &total_len);
- num_sect = total_len >> 9;
- if (event->xSubtype == (viomajorsubtype_blockio | vioblockread))
- pci_direction = DMA_FROM_DEVICE;
- else
- pci_direction = DMA_TO_DEVICE;
- req = (struct request *)bevent->event.xCorrelationToken;
- d = req->rq_disk->private_data;
-
- dma_unmap_sg(d->dev, sg, num_sg, pci_direction);
-
- /*
- * Since this is running in interrupt mode, we need to make sure
- * we're not stepping on any global I/O operations
- */
- spin_lock_irqsave(&viodasd_spinlock, irq_flags);
- num_req_outstanding--;
- spin_unlock_irqrestore(&viodasd_spinlock, irq_flags);
-
- error = (event->xRc == HvLpEvent_Rc_Good) ? 0 : -EIO;
- if (error) {
- const struct vio_error_entry *err;
- err = vio_lookup_rc(viodasd_err_table, bevent->sub_result);
- pr_warning("read/write error %d:0x%04x (%s)\n",
- event->xRc, bevent->sub_result, err->msg);
- num_sect = blk_rq_sectors(req);
- }
- qlock = req->q->queue_lock;
- spin_lock_irqsave(qlock, irq_flags);
- viodasd_end_request(req, error, num_sect);
- spin_unlock_irqrestore(qlock, irq_flags);
-
- /* Finally, try to get more requests off of this device's queue */
- viodasd_restart_all_queues_starting_from(DEVICE_NO(d));
-
- return 0;
-}
-
-/* This routine handles incoming block LP events */
-static void handle_block_event(struct HvLpEvent *event)
-{
- struct vioblocklpevent *bevent = (struct vioblocklpevent *)event;
- struct viodasd_waitevent *pwe;
-
- if (event == NULL)
- /* Notification that a partition went away! */
- return;
- /* First, we should NEVER get an int here...only acks */
- if (hvlpevent_is_int(event)) {
- pr_warning("Yikes! got an int in viodasd event handler!\n");
- if (hvlpevent_need_ack(event)) {
- event->xRc = HvLpEvent_Rc_InvalidSubtype;
- HvCallEvent_ackLpEvent(event);
- }
- }
-
- switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) {
- case vioblockopen:
- /*
- * Handle a response to an open request. We get all the
- * disk information in the response, so update it. The
- * correlation token contains a pointer to a waitevent
- * structure that has a completion in it. update the
- * return code in the waitevent structure and post the
- * completion to wake up the guy who sent the request
- */
- pwe = (struct viodasd_waitevent *)event->xCorrelationToken;
- pwe->rc = event->xRc;
- pwe->sub_result = bevent->sub_result;
- if (event->xRc == HvLpEvent_Rc_Good) {
- const struct open_data *data = &bevent->u.open_data;
- struct viodasd_device *device =
- &viodasd_devices[bevent->disk];
- device->read_only =
- bevent->flags & vioblockflags_ro;
- device->size = data->disk_size;
- device->cylinders = data->cylinders;
- device->tracks = data->tracks;
- device->sectors = data->sectors;
- device->bytes_per_sector = data->bytes_per_sector;
- pwe->max_disk = data->max_disk;
- }
- complete(&pwe->com);
- break;
- case vioblockclose:
- break;
- case vioblockread:
- case vioblockwrite:
- viodasd_handle_read_write(bevent);
- break;
-
- default:
- pr_warning("invalid subtype!");
- if (hvlpevent_need_ack(event)) {
- event->xRc = HvLpEvent_Rc_InvalidSubtype;
- HvCallEvent_ackLpEvent(event);
- }
- }
-}
-
-/*
- * Get the driver to reprobe for more disks.
- */
-static ssize_t probe_disks(struct device_driver *drv, const char *buf,
- size_t count)
-{
- struct viodasd_device *d;
-
- for (d = viodasd_devices; d < &viodasd_devices[MAX_DISKNO]; d++) {
- if (d->disk == NULL)
- probe_disk(d);
- }
- return count;
-}
-static DRIVER_ATTR(probe, S_IWUSR, NULL, probe_disks);
-
-static int viodasd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
-{
- struct viodasd_device *d = &viodasd_devices[vdev->unit_address];
-
- d->dev = &vdev->dev;
- if (!probe_disk(d))
- return -ENODEV;
- return 0;
-}
-
-static int viodasd_remove(struct vio_dev *vdev)
-{
- struct viodasd_device *d;
-
- d = &viodasd_devices[vdev->unit_address];
- if (d->disk) {
- del_gendisk(d->disk);
- blk_cleanup_queue(d->disk->queue);
- put_disk(d->disk);
- d->disk = NULL;
- }
- d->dev = NULL;
- return 0;
-}
-
-/**
- * viodasd_device_table: Used by vio.c to match devices that we
- * support.
- */
-static struct vio_device_id viodasd_device_table[] __devinitdata = {
- { "block", "IBM,iSeries-viodasd" },
- { "", "" }
-};
-MODULE_DEVICE_TABLE(vio, viodasd_device_table);
-
-static struct vio_driver viodasd_driver = {
- .id_table = viodasd_device_table,
- .probe = viodasd_probe,
- .remove = viodasd_remove,
- .driver = {
- .name = "viodasd",
- .owner = THIS_MODULE,
- }
-};
-
-static int need_delete_probe;
-
-/*
- * Initialize the whole device driver. Handle module and non-module
- * versions
- */
-static int __init viodasd_init(void)
-{
- int rc;
-
- if (!firmware_has_feature(FW_FEATURE_ISERIES)) {
- rc = -ENODEV;
- goto early_fail;
- }
-
- /* Try to open to our host lp */
- if (viopath_hostLp == HvLpIndexInvalid)
- vio_set_hostlp();
-
- if (viopath_hostLp == HvLpIndexInvalid) {
- pr_warning("invalid hosting partition\n");
- rc = -EIO;
- goto early_fail;
- }
-
- pr_info("vers " VIOD_VERS ", hosting partition %d\n", viopath_hostLp);
-
- /* register the block device */
- rc = register_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
- if (rc) {
- pr_warning("Unable to get major number %d for %s\n",
- VIODASD_MAJOR, VIOD_GENHD_NAME);
- goto early_fail;
- }
- /* Actually open the path to the hosting partition */
- rc = viopath_open(viopath_hostLp, viomajorsubtype_blockio,
- VIOMAXREQ + 2);
- if (rc) {
- pr_warning("error opening path to host partition %d\n",
- viopath_hostLp);
- goto unregister_blk;
- }
-
- /* Initialize our request handler */
- vio_setHandler(viomajorsubtype_blockio, handle_block_event);
-
- rc = vio_register_driver(&viodasd_driver);
- if (rc) {
- pr_warning("vio_register_driver failed\n");
- goto unset_handler;
- }
-
- /*
- * If this call fails, it just means that we cannot dynamically
- * add virtual disks, but the driver will still work fine for
- * all existing disk, so ignore the failure.
- */
- if (!driver_create_file(&viodasd_driver.driver, &driver_attr_probe))
- need_delete_probe = 1;
-
- return 0;
-
-unset_handler:
- vio_clearHandler(viomajorsubtype_blockio);
- viopath_close(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2);
-unregister_blk:
- unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
-early_fail:
- return rc;
-}
-module_init(viodasd_init);
-
-void __exit viodasd_exit(void)
-{
- if (need_delete_probe)
- driver_remove_file(&viodasd_driver.driver, &driver_attr_probe);
- vio_unregister_driver(&viodasd_driver);
- vio_clearHandler(viomajorsubtype_blockio);
- viopath_close(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2);
- unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
-}
-module_exit(viodasd_exit);
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
deleted file mode 100644
index 7878da8..0000000
--- a/drivers/cdrom/viocd.c
+++ /dev/null
@@ -1,739 +0,0 @@
-/* -*- linux-c -*-
- * drivers/cdrom/viocd.c
- *
- * iSeries Virtual CD Rom
- *
- * Authors: Dave Boutcher <boutcher@us.ibm.com>
- * Ryan Arnold <ryanarn@us.ibm.com>
- * Colin Devilbiss <devilbis@us.ibm.com>
- * Stephen Rothwell
- *
- * (C) Copyright 2000-2004 IBM Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) anyu later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This routine provides access to CD ROM drives owned and managed by an
- * OS/400 partition running on the same box as this Linux partition.
- *
- * All operations are performed by sending messages back and forth to
- * the OS/400 partition.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/major.h>
-#include <linux/blkdev.h>
-#include <linux/cdrom.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/module.h>
-#include <linux/completion.h>
-#include <linux/proc_fs.h>
-#include <linux/mutex.h>
-#include <linux/seq_file.h>
-#include <linux/scatterlist.h>
-
-#include <asm/vio.h>
-#include <asm/iseries/hv_types.h>
-#include <asm/iseries/hv_lp_event.h>
-#include <asm/iseries/vio.h>
-#include <asm/firmware.h>
-
-#define VIOCD_DEVICE "iseries/vcd"
-
-#define VIOCD_VERS "1.06"
-
-/*
- * Should probably make this a module parameter....sigh
- */
-#define VIOCD_MAX_CD HVMAXARCHITECTEDVIRTUALCDROMS
-
-static DEFINE_MUTEX(viocd_mutex);
-static const struct vio_error_entry viocd_err_table[] = {
- {0x0201, EINVAL, "Invalid Range"},
- {0x0202, EINVAL, "Invalid Token"},
- {0x0203, EIO, "DMA Error"},
- {0x0204, EIO, "Use Error"},
- {0x0205, EIO, "Release Error"},
- {0x0206, EINVAL, "Invalid CD"},
- {0x020C, EROFS, "Read Only Device"},
- {0x020D, ENOMEDIUM, "Changed or Missing Volume (or Varied Off?)"},
- {0x020E, EIO, "Optical System Error (Varied Off?)"},
- {0x02FF, EIO, "Internal Error"},
- {0x3010, EIO, "Changed Volume"},
- {0xC100, EIO, "Optical System Error"},
- {0x0000, 0, NULL},
-};
-
-/*
- * This is the structure we use to exchange info between driver and interrupt
- * handler
- */
-struct viocd_waitevent {
- struct completion com;
- int rc;
- u16 sub_result;
- int changed;
-};
-
-/* this is a lookup table for the true capabilities of a device */
-struct capability_entry {
- char *type;
- int capability;
-};
-
-static struct capability_entry capability_table[] __initdata = {
- { "6330", CDC_LOCK | CDC_DVD_RAM | CDC_RAM },
- { "6331", CDC_LOCK | CDC_DVD_RAM | CDC_RAM },
- { "6333", CDC_LOCK | CDC_DVD_RAM | CDC_RAM },
- { "632A", CDC_LOCK | CDC_DVD_RAM | CDC_RAM },
- { "6321", CDC_LOCK },
- { "632B", 0 },
- { NULL , CDC_LOCK },
-};
-
-/* These are our internal structures for keeping track of devices */
-static int viocd_numdev;
-
-struct disk_info {
- struct gendisk *viocd_disk;
- struct cdrom_device_info viocd_info;
- struct device *dev;
- const char *rsrcname;
- const char *type;
- const char *model;
-};
-static struct disk_info viocd_diskinfo[VIOCD_MAX_CD];
-
-#define DEVICE_NR(di) ((di) - &viocd_diskinfo[0])
-
-static spinlock_t viocd_reqlock;
-
-#define MAX_CD_REQ 1
-
-/* procfs support */
-static int proc_viocd_show(struct seq_file *m, void *v)
-{
- int i;
-
- for (i = 0; i < viocd_numdev; i++) {
- seq_printf(m, "viocd device %d is iSeries resource %10.10s"
- "type %4.4s, model %3.3s\n",
- i, viocd_diskinfo[i].rsrcname,
- viocd_diskinfo[i].type,
- viocd_diskinfo[i].model);
- }
- return 0;
-}
-
-static int proc_viocd_open(struct inode *inode, struct file *file)
-{
- return single_open(file, proc_viocd_show, NULL);
-}
-
-static const struct file_operations proc_viocd_operations = {
- .owner = THIS_MODULE,
- .open = proc_viocd_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int viocd_blk_open(struct block_device *bdev, fmode_t mode)
-{
- struct disk_info *di = bdev->bd_disk->private_data;
- int ret;
-
- mutex_lock(&viocd_mutex);
- ret = cdrom_open(&di->viocd_info, bdev, mode);
- mutex_unlock(&viocd_mutex);
-
- return ret;
-}
-
-static int viocd_blk_release(struct gendisk *disk, fmode_t mode)
-{
- struct disk_info *di = disk->private_data;
- mutex_lock(&viocd_mutex);
- cdrom_release(&di->viocd_info, mode);
- mutex_unlock(&viocd_mutex);
- return 0;
-}
-
-static int viocd_blk_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned cmd, unsigned long arg)
-{
- struct disk_info *di = bdev->bd_disk->private_data;
- int ret;
-
- mutex_lock(&viocd_mutex);
- ret = cdrom_ioctl(&di->viocd_info, bdev, mode, cmd, arg);
- mutex_unlock(&viocd_mutex);
-
- return ret;
-}
-
-static unsigned int viocd_blk_check_events(struct gendisk *disk,
- unsigned int clearing)
-{
- struct disk_info *di = disk->private_data;
- return cdrom_check_events(&di->viocd_info, clearing);
-}
-
-static const struct block_device_operations viocd_fops = {
- .owner = THIS_MODULE,
- .open = viocd_blk_open,
- .release = viocd_blk_release,
- .ioctl = viocd_blk_ioctl,
- .check_events = viocd_blk_check_events,
-};
-
-static int viocd_open(struct cdrom_device_info *cdi, int purpose)
-{
- struct disk_info *diskinfo = cdi->handle;
- int device_no = DEVICE_NR(diskinfo);
- HvLpEvent_Rc hvrc;
- struct viocd_waitevent we;
-
- init_completion(&we.com);
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_cdio | viocdopen,
- HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- (u64)&we, VIOVERSION << 16, ((u64)device_no << 48),
- 0, 0, 0);
- if (hvrc != 0) {
- pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
- (int)hvrc);
- return -EIO;
- }
-
- wait_for_completion(&we.com);
-
- if (we.rc) {
- const struct vio_error_entry *err =
- vio_lookup_rc(viocd_err_table, we.sub_result);
- pr_warning("bad rc %d:0x%04X on open: %s\n",
- we.rc, we.sub_result, err->msg);
- return -err->errno;
- }
-
- return 0;
-}
-
-static void viocd_release(struct cdrom_device_info *cdi)
-{
- int device_no = DEVICE_NR((struct disk_info *)cdi->handle);
- HvLpEvent_Rc hvrc;
-
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_cdio | viocdclose,
- HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp), 0,
- VIOVERSION << 16, ((u64)device_no << 48), 0, 0, 0);
- if (hvrc != 0)
- pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
- (int)hvrc);
-}
-
-/* Send a read or write request to OS/400 */
-static int send_request(struct request *req)
-{
- HvLpEvent_Rc hvrc;
- struct disk_info *diskinfo = req->rq_disk->private_data;
- u64 len;
- dma_addr_t dmaaddr;
- int direction;
- u16 cmd;
- struct scatterlist sg;
-
- BUG_ON(req->nr_phys_segments > 1);
-
- if (rq_data_dir(req) == READ) {
- direction = DMA_FROM_DEVICE;
- cmd = viomajorsubtype_cdio | viocdread;
- } else {
- direction = DMA_TO_DEVICE;
- cmd = viomajorsubtype_cdio | viocdwrite;
- }
-
- sg_init_table(&sg, 1);
- if (blk_rq_map_sg(req->q, req, &sg) == 0) {
- pr_warning("error setting up scatter/gather list\n");
- return -1;
- }
-
- if (dma_map_sg(diskinfo->dev, &sg, 1, direction) == 0) {
- pr_warning("error allocating sg tce\n");
- return -1;
- }
- dmaaddr = sg_dma_address(&sg);
- len = sg_dma_len(&sg);
-
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo, cmd,
- HvLpEvent_AckInd_DoAck,
- HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- (u64)req, VIOVERSION << 16,
- ((u64)DEVICE_NR(diskinfo) << 48) | dmaaddr,
- (u64)blk_rq_pos(req) * 512, len, 0);
- if (hvrc != HvLpEvent_Rc_Good) {
- pr_warning("hv error on op %d\n", (int)hvrc);
- return -1;
- }
-
- return 0;
-}
-
-static int rwreq;
-
-static void do_viocd_request(struct request_queue *q)
-{
- struct request *req;
-
- while ((rwreq == 0) && ((req = blk_fetch_request(q)) != NULL)) {
- if (req->cmd_type != REQ_TYPE_FS)
- __blk_end_request_all(req, -EIO);
- else if (send_request(req) < 0) {
- pr_warning("unable to send message to OS/400!\n");
- __blk_end_request_all(req, -EIO);
- } else
- rwreq++;
- }
-}
-
-static unsigned int viocd_check_events(struct cdrom_device_info *cdi,
- unsigned int clearing, int disc_nr)
-{
- struct viocd_waitevent we;
- HvLpEvent_Rc hvrc;
- int device_no = DEVICE_NR((struct disk_info *)cdi->handle);
-
- init_completion(&we.com);
-
- /* Send the open event to OS/400 */
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_cdio | viocdcheck,
- HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- (u64)&we, VIOVERSION << 16, ((u64)device_no << 48),
- 0, 0, 0);
- if (hvrc != 0) {
- pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
- (int)hvrc);
- return 0;
- }
-
- wait_for_completion(&we.com);
-
- /* Check the return code. If bad, assume no change */
- if (we.rc) {
- const struct vio_error_entry *err =
- vio_lookup_rc(viocd_err_table, we.sub_result);
- pr_warning("bad rc %d:0x%04X on check_change: %s; Assuming no change\n",
- we.rc, we.sub_result, err->msg);
- return 0;
- }
-
- return we.changed ? DISK_EVENT_MEDIA_CHANGE : 0;
-}
-
-static int viocd_lock_door(struct cdrom_device_info *cdi, int locking)
-{
- HvLpEvent_Rc hvrc;
- u64 device_no = DEVICE_NR((struct disk_info *)cdi->handle);
- /* NOTE: flags is 1 or 0 so it won't overwrite the device_no */
- u64 flags = !!locking;
- struct viocd_waitevent we;
-
- init_completion(&we.com);
-
- /* Send the lockdoor event to OS/400 */
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_cdio | viocdlockdoor,
- HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- (u64)&we, VIOVERSION << 16,
- (device_no << 48) | (flags << 32), 0, 0, 0);
- if (hvrc != 0) {
- pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
- (int)hvrc);
- return -EIO;
- }
-
- wait_for_completion(&we.com);
-
- if (we.rc != 0)
- return -EIO;
- return 0;
-}
-
-static int viocd_packet(struct cdrom_device_info *cdi,
- struct packet_command *cgc)
-{
- unsigned int buflen = cgc->buflen;
- int ret = -EIO;
-
- switch (cgc->cmd[0]) {
- case GPCMD_READ_DISC_INFO:
- {
- disc_information *di = (disc_information *)cgc->buffer;
-
- if (buflen >= 2) {
- di->disc_information_length = cpu_to_be16(1);
- ret = 0;
- }
- if (buflen >= 3)
- di->erasable =
- (cdi->ops->capability & ~cdi->mask
- & (CDC_DVD_RAM | CDC_RAM)) != 0;
- }
- break;
- case GPCMD_GET_CONFIGURATION:
- if (cgc->cmd[3] == CDF_RWRT) {
- struct rwrt_feature_desc *rfd = (struct rwrt_feature_desc *)(cgc->buffer + sizeof(struct feature_header));
-
- if ((buflen >=
- (sizeof(struct feature_header) + sizeof(*rfd))) &&
- (cdi->ops->capability & ~cdi->mask
- & (CDC_DVD_RAM | CDC_RAM))) {
- rfd->feature_code = cpu_to_be16(CDF_RWRT);
- rfd->curr = 1;
- ret = 0;
- }
- }
- break;
- default:
- if (cgc->sense) {
- /* indicate Unknown code */
- cgc->sense->sense_key = 0x05;
- cgc->sense->asc = 0x20;
- cgc->sense->ascq = 0x00;
- }
- break;
- }
-
- cgc->stat = ret;
- return ret;
-}
-
-static void restart_all_queues(int first_index)
-{
- int i;
-
- for (i = first_index + 1; i < viocd_numdev; i++)
- if (viocd_diskinfo[i].viocd_disk)
- blk_run_queue(viocd_diskinfo[i].viocd_disk->queue);
- for (i = 0; i <= first_index; i++)
- if (viocd_diskinfo[i].viocd_disk)
- blk_run_queue(viocd_diskinfo[i].viocd_disk->queue);
-}
-
-/* This routine handles incoming CD LP events */
-static void vio_handle_cd_event(struct HvLpEvent *event)
-{
- struct viocdlpevent *bevent;
- struct viocd_waitevent *pwe;
- struct disk_info *di;
- unsigned long flags;
- struct request *req;
-
-
- if (event == NULL)
- /* Notification that a partition went away! */
- return;
- /* First, we should NEVER get an int here...only acks */
- if (hvlpevent_is_int(event)) {
- pr_warning("Yikes! got an int in viocd event handler!\n");
- if (hvlpevent_need_ack(event)) {
- event->xRc = HvLpEvent_Rc_InvalidSubtype;
- HvCallEvent_ackLpEvent(event);
- }
- }
-
- bevent = (struct viocdlpevent *)event;
-
- switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) {
- case viocdopen:
- if (event->xRc == 0) {
- di = &viocd_diskinfo[bevent->disk];
- blk_queue_logical_block_size(di->viocd_disk->queue,
- bevent->block_size);
- set_capacity(di->viocd_disk,
- bevent->media_size *
- bevent->block_size / 512);
- }
- /* FALLTHROUGH !! */
- case viocdlockdoor:
- pwe = (struct viocd_waitevent *)event->xCorrelationToken;
-return_complete:
- pwe->rc = event->xRc;
- pwe->sub_result = bevent->sub_result;
- complete(&pwe->com);
- break;
-
- case viocdcheck:
- pwe = (struct viocd_waitevent *)event->xCorrelationToken;
- pwe->changed = bevent->flags;
- goto return_complete;
-
- case viocdclose:
- break;
-
- case viocdwrite:
- case viocdread:
- /*
- * Since this is running in interrupt mode, we need to
- * make sure we're not stepping on any global I/O operations
- */
- di = &viocd_diskinfo[bevent->disk];
- spin_lock_irqsave(&viocd_reqlock, flags);
- dma_unmap_single(di->dev, bevent->token, bevent->len,
- ((event->xSubtype & VIOMINOR_SUBTYPE_MASK) == viocdread)
- ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
- req = (struct request *)bevent->event.xCorrelationToken;
- rwreq--;
-
- if (event->xRc != HvLpEvent_Rc_Good) {
- const struct vio_error_entry *err =
- vio_lookup_rc(viocd_err_table,
- bevent->sub_result);
- pr_warning("request %p failed with rc %d:0x%04X: %s\n",
- req, event->xRc,
- bevent->sub_result, err->msg);
- __blk_end_request_all(req, -EIO);
- } else
- __blk_end_request_all(req, 0);
-
- /* restart handling of incoming requests */
- spin_unlock_irqrestore(&viocd_reqlock, flags);
- restart_all_queues(bevent->disk);
- break;
-
- default:
- pr_warning("message with invalid subtype %0x04X!\n",
- event->xSubtype & VIOMINOR_SUBTYPE_MASK);
- if (hvlpevent_need_ack(event)) {
- event->xRc = HvLpEvent_Rc_InvalidSubtype;
- HvCallEvent_ackLpEvent(event);
- }
- }
-}
-
-static int viocd_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
- void *arg)
-{
- return -EINVAL;
-}
-
-static struct cdrom_device_ops viocd_dops = {
- .open = viocd_open,
- .release = viocd_release,
- .check_events = viocd_check_events,
- .lock_door = viocd_lock_door,
- .generic_packet = viocd_packet,
- .audio_ioctl = viocd_audio_ioctl,
- .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET | CDC_DRIVE_STATUS | CDC_GENERIC_PACKET | CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_RAM
-};
-
-static int find_capability(const char *type)
-{
- struct capability_entry *entry;
-
- for(entry = capability_table; entry->type; ++entry)
- if(!strncmp(entry->type, type, 4))
- break;
- return entry->capability;
-}
-
-static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
-{
- struct gendisk *gendisk;
- int deviceno;
- struct disk_info *d;
- struct cdrom_device_info *c;
- struct request_queue *q;
- struct device_node *node = vdev->dev.of_node;
-
- deviceno = vdev->unit_address;
- if (deviceno >= VIOCD_MAX_CD)
- return -ENODEV;
- if (!node)
- return -ENODEV;
-
- if (deviceno >= viocd_numdev)
- viocd_numdev = deviceno + 1;
-
- d = &viocd_diskinfo[deviceno];
- d->rsrcname = of_get_property(node, "linux,vio_rsrcname", NULL);
- d->type = of_get_property(node, "linux,vio_type", NULL);
- d->model = of_get_property(node, "linux,vio_model", NULL);
-
- c = &d->viocd_info;
-
- c->ops = &viocd_dops;
- c->speed = 4;
- c->capacity = 1;
- c->handle = d;
- c->mask = ~find_capability(d->type);
- sprintf(c->name, VIOCD_DEVICE "%c", 'a' + deviceno);
-
- if (register_cdrom(c) != 0) {
- pr_warning("Cannot register viocd CD-ROM %s!\n", c->name);
- goto out;
- }
- pr_info("cd %s is iSeries resource %10.10s type %4.4s, model %3.3s\n",
- c->name, d->rsrcname, d->type, d->model);
- q = blk_init_queue(do_viocd_request, &viocd_reqlock);
- if (q == NULL) {
- pr_warning("Cannot allocate queue for %s!\n", c->name);
- goto out_unregister_cdrom;
- }
- gendisk = alloc_disk(1);
- if (gendisk == NULL) {
- pr_warning("Cannot create gendisk for %s!\n", c->name);
- goto out_cleanup_queue;
- }
- gendisk->major = VIOCD_MAJOR;
- gendisk->first_minor = deviceno;
- strncpy(gendisk->disk_name, c->name,
- sizeof(gendisk->disk_name));
- blk_queue_max_segments(q, 1);
- blk_queue_max_hw_sectors(q, 4096 / 512);
- gendisk->queue = q;
- gendisk->fops = &viocd_fops;
- gendisk->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE |
- GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
- set_capacity(gendisk, 0);
- gendisk->private_data = d;
- d->viocd_disk = gendisk;
- d->dev = &vdev->dev;
- gendisk->driverfs_dev = d->dev;
- add_disk(gendisk);
- return 0;
-
-out_cleanup_queue:
- blk_cleanup_queue(q);
-out_unregister_cdrom:
- unregister_cdrom(c);
-out:
- return -ENODEV;
-}
-
-static int viocd_remove(struct vio_dev *vdev)
-{
- struct disk_info *d = &viocd_diskinfo[vdev->unit_address];
-
- unregister_cdrom(&d->viocd_info);
- del_gendisk(d->viocd_disk);
- blk_cleanup_queue(d->viocd_disk->queue);
- put_disk(d->viocd_disk);
- return 0;
-}
-
-/**
- * viocd_device_table: Used by vio.c to match devices that we
- * support.
- */
-static struct vio_device_id viocd_device_table[] __devinitdata = {
- { "block", "IBM,iSeries-viocd" },
- { "", "" }
-};
-MODULE_DEVICE_TABLE(vio, viocd_device_table);
-
-static struct vio_driver viocd_driver = {
- .id_table = viocd_device_table,
- .probe = viocd_probe,
- .remove = viocd_remove,
- .driver = {
- .name = "viocd",
- .owner = THIS_MODULE,
- }
-};
-
-static int __init viocd_init(void)
-{
- int ret = 0;
-
- if (!firmware_has_feature(FW_FEATURE_ISERIES))
- return -ENODEV;
-
- if (viopath_hostLp == HvLpIndexInvalid) {
- vio_set_hostlp();
- /* If we don't have a host, bail out */
- if (viopath_hostLp == HvLpIndexInvalid)
- return -ENODEV;
- }
-
- pr_info("vers " VIOCD_VERS ", hosting partition %d\n", viopath_hostLp);
-
- if (register_blkdev(VIOCD_MAJOR, VIOCD_DEVICE) != 0) {
- pr_warning("Unable to get major %d for %s\n",
- VIOCD_MAJOR, VIOCD_DEVICE);
- return -EIO;
- }
-
- ret = viopath_open(viopath_hostLp, viomajorsubtype_cdio,
- MAX_CD_REQ + 2);
- if (ret) {
- pr_warning("error opening path to host partition %d\n",
- viopath_hostLp);
- goto out_unregister;
- }
-
- /* Initialize our request handler */
- vio_setHandler(viomajorsubtype_cdio, vio_handle_cd_event);
-
- spin_lock_init(&viocd_reqlock);
-
- ret = vio_register_driver(&viocd_driver);
- if (ret)
- goto out_free_info;
-
- proc_create("iSeries/viocd", S_IFREG|S_IRUGO, NULL,
- &proc_viocd_operations);
- return 0;
-
-out_free_info:
- vio_clearHandler(viomajorsubtype_cdio);
- viopath_close(viopath_hostLp, viomajorsubtype_cdio, MAX_CD_REQ + 2);
-out_unregister:
- unregister_blkdev(VIOCD_MAJOR, VIOCD_DEVICE);
- return ret;
-}
-
-static void __exit viocd_exit(void)
-{
- remove_proc_entry("iSeries/viocd", NULL);
- vio_unregister_driver(&viocd_driver);
- viopath_close(viopath_hostLp, viomajorsubtype_cdio, MAX_CD_REQ + 2);
- vio_clearHandler(viomajorsubtype_cdio);
- unregister_blkdev(VIOCD_MAJOR, VIOCD_DEVICE);
-}
-
-module_init(viocd_init);
-module_exit(viocd_exit);
-MODULE_LICENSE("GPL");
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c
deleted file mode 100644
index 8b34c65..0000000
--- a/drivers/char/viotape.c
+++ /dev/null
@@ -1,1041 +0,0 @@
-/* -*- linux-c -*-
- * drivers/char/viotape.c
- *
- * iSeries Virtual Tape
- *
- * Authors: Dave Boutcher <boutcher@us.ibm.com>
- * Ryan Arnold <ryanarn@us.ibm.com>
- * Colin Devilbiss <devilbis@us.ibm.com>
- * Stephen Rothwell
- *
- * (C) Copyright 2000-2004 IBM Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) anyu later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This routine provides access to tape drives owned and managed by an OS/400
- * partition running on the same box as this Linux partition.
- *
- * All tape operations are performed by sending messages back and forth to
- * the OS/400 partition. The format of the messages is defined in
- * iseries/vio.h
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/mtio.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
-#include <linux/major.h>
-#include <linux/completion.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-
-#include <asm/uaccess.h>
-#include <asm/ioctls.h>
-#include <asm/firmware.h>
-#include <asm/vio.h>
-#include <asm/iseries/vio.h>
-#include <asm/iseries/hv_lp_event.h>
-#include <asm/iseries/hv_call_event.h>
-#include <asm/iseries/hv_lp_config.h>
-
-#define VIOTAPE_VERSION "1.2"
-#define VIOTAPE_MAXREQ 1
-
-#define VIOTAPE_KERN_WARN KERN_WARNING "viotape: "
-#define VIOTAPE_KERN_INFO KERN_INFO "viotape: "
-
-static DEFINE_MUTEX(proc_viotape_mutex);
-static int viotape_numdev;
-
-/*
- * The minor number follows the conventions of the SCSI tape drives. The
- * rewind and mode are encoded in the minor #. We use this struct to break
- * them out
- */
-struct viot_devinfo_struct {
- int devno;
- int mode;
- int rewind;
-};
-
-#define VIOTAPOP_RESET 0
-#define VIOTAPOP_FSF 1
-#define VIOTAPOP_BSF 2
-#define VIOTAPOP_FSR 3
-#define VIOTAPOP_BSR 4
-#define VIOTAPOP_WEOF 5
-#define VIOTAPOP_REW 6
-#define VIOTAPOP_NOP 7
-#define VIOTAPOP_EOM 8
-#define VIOTAPOP_ERASE 9
-#define VIOTAPOP_SETBLK 10
-#define VIOTAPOP_SETDENSITY 11
-#define VIOTAPOP_SETPOS 12
-#define VIOTAPOP_GETPOS 13
-#define VIOTAPOP_SETPART 14
-#define VIOTAPOP_UNLOAD 15
-
-enum viotaperc {
- viotape_InvalidRange = 0x0601,
- viotape_InvalidToken = 0x0602,
- viotape_DMAError = 0x0603,
- viotape_UseError = 0x0604,
- viotape_ReleaseError = 0x0605,
- viotape_InvalidTape = 0x0606,
- viotape_InvalidOp = 0x0607,
- viotape_TapeErr = 0x0608,
-
- viotape_AllocTimedOut = 0x0640,
- viotape_BOTEnc = 0x0641,
- viotape_BlankTape = 0x0642,
- viotape_BufferEmpty = 0x0643,
- viotape_CleanCartFound = 0x0644,
- viotape_CmdNotAllowed = 0x0645,
- viotape_CmdNotSupported = 0x0646,
- viotape_DataCheck = 0x0647,
- viotape_DecompressErr = 0x0648,
- viotape_DeviceTimeout = 0x0649,
- viotape_DeviceUnavail = 0x064a,
- viotape_DeviceBusy = 0x064b,
- viotape_EndOfMedia = 0x064c,
- viotape_EndOfTape = 0x064d,
- viotape_EquipCheck = 0x064e,
- viotape_InsufficientRs = 0x064f,
- viotape_InvalidLogBlk = 0x0650,
- viotape_LengthError = 0x0651,
- viotape_LibDoorOpen = 0x0652,
- viotape_LoadFailure = 0x0653,
- viotape_NotCapable = 0x0654,
- viotape_NotOperational = 0x0655,
- viotape_NotReady = 0x0656,
- viotape_OpCancelled = 0x0657,
- viotape_PhyLinkErr = 0x0658,
- viotape_RdyNotBOT = 0x0659,
- viotape_TapeMark = 0x065a,
- viotape_WriteProt = 0x065b
-};
-
-static const struct vio_error_entry viotape_err_table[] = {
- { viotape_InvalidRange, EIO, "Internal error" },
- { viotape_InvalidToken, EIO, "Internal error" },
- { viotape_DMAError, EIO, "DMA error" },
- { viotape_UseError, EIO, "Internal error" },
- { viotape_ReleaseError, EIO, "Internal error" },
- { viotape_InvalidTape, EIO, "Invalid tape device" },
- { viotape_InvalidOp, EIO, "Invalid operation" },
- { viotape_TapeErr, EIO, "Tape error" },
- { viotape_AllocTimedOut, EBUSY, "Allocate timed out" },
- { viotape_BOTEnc, EIO, "Beginning of tape encountered" },
- { viotape_BlankTape, EIO, "Blank tape" },
- { viotape_BufferEmpty, EIO, "Buffer empty" },
- { viotape_CleanCartFound, ENOMEDIUM, "Cleaning cartridge found" },
- { viotape_CmdNotAllowed, EIO, "Command not allowed" },
- { viotape_CmdNotSupported, EIO, "Command not supported" },
- { viotape_DataCheck, EIO, "Data check" },
- { viotape_DecompressErr, EIO, "Decompression error" },
- { viotape_DeviceTimeout, EBUSY, "Device timeout" },
- { viotape_DeviceUnavail, EIO, "Device unavailable" },
- { viotape_DeviceBusy, EBUSY, "Device busy" },
- { viotape_EndOfMedia, ENOSPC, "End of media" },
- { viotape_EndOfTape, ENOSPC, "End of tape" },
- { viotape_EquipCheck, EIO, "Equipment check" },
- { viotape_InsufficientRs, EOVERFLOW, "Insufficient tape resources" },
- { viotape_InvalidLogBlk, EIO, "Invalid logical block location" },
- { viotape_LengthError, EOVERFLOW, "Length error" },
- { viotape_LibDoorOpen, EBUSY, "Door open" },
- { viotape_LoadFailure, ENOMEDIUM, "Load failure" },
- { viotape_NotCapable, EIO, "Not capable" },
- { viotape_NotOperational, EIO, "Not operational" },
- { viotape_NotReady, EIO, "Not ready" },
- { viotape_OpCancelled, EIO, "Operation cancelled" },
- { viotape_PhyLinkErr, EIO, "Physical link error" },
- { viotape_RdyNotBOT, EIO, "Ready but not beginning of tape" },
- { viotape_TapeMark, EIO, "Tape mark" },
- { viotape_WriteProt, EROFS, "Write protection error" },
- { 0, 0, NULL },
-};
-
-/* Maximum number of tapes we support */
-#define VIOTAPE_MAX_TAPE HVMAXARCHITECTEDVIRTUALTAPES
-#define MAX_PARTITIONS 4
-
-/* defines for current tape state */
-#define VIOT_IDLE 0
-#define VIOT_READING 1
-#define VIOT_WRITING 2
-
-/* Our info on the tapes */
-static struct {
- const char *rsrcname;
- const char *type;
- const char *model;
-} viotape_unitinfo[VIOTAPE_MAX_TAPE];
-
-static struct mtget viomtget[VIOTAPE_MAX_TAPE];
-
-static struct class *tape_class;
-
-static struct device *tape_device[VIOTAPE_MAX_TAPE];
-
-/*
- * maintain the current state of each tape (and partition)
- * so that we know when to write EOF marks.
- */
-static struct {
- unsigned char cur_part;
- unsigned char part_stat_rwi[MAX_PARTITIONS];
-} state[VIOTAPE_MAX_TAPE];
-
-/* We single-thread */
-static struct semaphore reqSem;
-
-/*
- * When we send a request, we use this struct to get the response back
- * from the interrupt handler
- */
-struct op_struct {
- void *buffer;
- dma_addr_t dmaaddr;
- size_t count;
- int rc;
- int non_blocking;
- struct completion com;
- struct device *dev;
- struct op_struct *next;
-};
-
-static spinlock_t op_struct_list_lock;
-static struct op_struct *op_struct_list;
-
-/* forward declaration to resolve interdependence */
-static int chg_state(int index, unsigned char new_state, struct file *file);
-
-/* procfs support */
-static int proc_viotape_show(struct seq_file *m, void *v)
-{
- int i;
-
- seq_printf(m, "viotape driver version " VIOTAPE_VERSION "\n");
- for (i = 0; i < viotape_numdev; i++) {
- seq_printf(m, "viotape device %d is iSeries resource %10.10s"
- "type %4.4s, model %3.3s\n",
- i, viotape_unitinfo[i].rsrcname,
- viotape_unitinfo[i].type,
- viotape_unitinfo[i].model);
- }
- return 0;
-}
-
-static int proc_viotape_open(struct inode *inode, struct file *file)
-{
- return single_open(file, proc_viotape_show, NULL);
-}
-
-static const struct file_operations proc_viotape_operations = {
- .owner = THIS_MODULE,
- .open = proc_viotape_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-/* Decode the device minor number into its parts */
-void get_dev_info(struct inode *ino, struct viot_devinfo_struct *devi)
-{
- devi->devno = iminor(ino) & 0x1F;
- devi->mode = (iminor(ino) & 0x60) >> 5;
- /* if bit is set in the minor, do _not_ rewind automatically */
- devi->rewind = (iminor(ino) & 0x80) == 0;
-}
-
-/* This is called only from the exit and init paths, so no need for locking */
-static void clear_op_struct_pool(void)
-{
- while (op_struct_list) {
- struct op_struct *toFree = op_struct_list;
- op_struct_list = op_struct_list->next;
- kfree(toFree);
- }
-}
-
-/* Likewise, this is only called from the init path */
-static int add_op_structs(int structs)
-{
- int i;
-
- for (i = 0; i < structs; ++i) {
- struct op_struct *new_struct =
- kmalloc(sizeof(*new_struct), GFP_KERNEL);
- if (!new_struct) {
- clear_op_struct_pool();
- return -ENOMEM;
- }
- new_struct->next = op_struct_list;
- op_struct_list = new_struct;
- }
- return 0;
-}
-
-/* Allocate an op structure from our pool */
-static struct op_struct *get_op_struct(void)
-{
- struct op_struct *retval;
- unsigned long flags;
-
- spin_lock_irqsave(&op_struct_list_lock, flags);
- retval = op_struct_list;
- if (retval)
- op_struct_list = retval->next;
- spin_unlock_irqrestore(&op_struct_list_lock, flags);
- if (retval) {
- memset(retval, 0, sizeof(*retval));
- init_completion(&retval->com);
- }
-
- return retval;
-}
-
-/* Return an op structure to our pool */
-static void free_op_struct(struct op_struct *op_struct)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&op_struct_list_lock, flags);
- op_struct->next = op_struct_list;
- op_struct_list = op_struct;
- spin_unlock_irqrestore(&op_struct_list_lock, flags);
-}
-
-/* Map our tape return codes to errno values */
-int tape_rc_to_errno(int tape_rc, char *operation, int tapeno)
-{
- const struct vio_error_entry *err;
-
- if (tape_rc == 0)
- return 0;
-
- err = vio_lookup_rc(viotape_err_table, tape_rc);
- printk(VIOTAPE_KERN_WARN "error(%s) 0x%04x on Device %d (%-10s): %s\n",
- operation, tape_rc, tapeno,
- viotape_unitinfo[tapeno].rsrcname, err->msg);
- return -err->errno;
-}
-
-/* Write */
-static ssize_t viotap_write(struct file *file, const char *buf,
- size_t count, loff_t * ppos)
-{
- HvLpEvent_Rc hvrc;
- unsigned short flags = file->f_flags;
- int noblock = ((flags & O_NONBLOCK) != 0);
- ssize_t ret;
- struct viot_devinfo_struct devi;
- struct op_struct *op = get_op_struct();
-
- if (op == NULL)
- return -ENOMEM;
-
- get_dev_info(file->f_path.dentry->d_inode, &devi);
-
- /*
- * We need to make sure we can send a request. We use
- * a semaphore to keep track of # requests in use. If
- * we are non-blocking, make sure we don't block on the
- * semaphore
- */
- if (noblock) {
- if (down_trylock(&reqSem)) {
- ret = -EWOULDBLOCK;
- goto free_op;
- }
- } else
- down(&reqSem);
-
- /* Allocate a DMA buffer */
- op->dev = tape_device[devi.devno];
- op->buffer = dma_alloc_coherent(op->dev, count, &op->dmaaddr,
- GFP_ATOMIC);
-
- if (op->buffer == NULL) {
- printk(VIOTAPE_KERN_WARN
- "error allocating dma buffer for len %ld\n",
- count);
- ret = -EFAULT;
- goto up_sem;
- }
-
- /* Copy the data into the buffer */
- if (copy_from_user(op->buffer, buf, count)) {
- printk(VIOTAPE_KERN_WARN "tape: error on copy from user\n");
- ret = -EFAULT;
- goto free_dma;
- }
-
- op->non_blocking = noblock;
- init_completion(&op->com);
- op->count = count;
-
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_tape | viotapewrite,
- HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- (u64)(unsigned long)op, VIOVERSION << 16,
- ((u64)devi.devno << 48) | op->dmaaddr, count, 0, 0);
- if (hvrc != HvLpEvent_Rc_Good) {
- printk(VIOTAPE_KERN_WARN "hv error on op %d\n",
- (int)hvrc);
- ret = -EIO;
- goto free_dma;
- }
-
- if (noblock)
- return count;
-
- wait_for_completion(&op->com);
-
- if (op->rc)
- ret = tape_rc_to_errno(op->rc, "write", devi.devno);
- else {
- chg_state(devi.devno, VIOT_WRITING, file);
- ret = op->count;
- }
-
-free_dma:
- dma_free_coherent(op->dev, count, op->buffer, op->dmaaddr);
-up_sem:
- up(&reqSem);
-free_op:
- free_op_struct(op);
- return ret;
-}
-
-/* read */
-static ssize_t viotap_read(struct file *file, char *buf, size_t count,
- loff_t *ptr)
-{
- HvLpEvent_Rc hvrc;
- unsigned short flags = file->f_flags;
- struct op_struct *op = get_op_struct();
- int noblock = ((flags & O_NONBLOCK) != 0);
- ssize_t ret;
- struct viot_devinfo_struct devi;
-
- if (op == NULL)
- return -ENOMEM;
-
- get_dev_info(file->f_path.dentry->d_inode, &devi);
-
- /*
- * We need to make sure we can send a request. We use
- * a semaphore to keep track of # requests in use. If
- * we are non-blocking, make sure we don't block on the
- * semaphore
- */
- if (noblock) {
- if (down_trylock(&reqSem)) {
- ret = -EWOULDBLOCK;
- goto free_op;
- }
- } else
- down(&reqSem);
-
- chg_state(devi.devno, VIOT_READING, file);
-
- /* Allocate a DMA buffer */
- op->dev = tape_device[devi.devno];
- op->buffer = dma_alloc_coherent(op->dev, count, &op->dmaaddr,
- GFP_ATOMIC);
- if (op->buffer == NULL) {
- ret = -EFAULT;
- goto up_sem;
- }
-
- op->count = count;
- init_completion(&op->com);
-
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_tape | viotaperead,
- HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- (u64)(unsigned long)op, VIOVERSION << 16,
- ((u64)devi.devno << 48) | op->dmaaddr, count, 0, 0);
- if (hvrc != HvLpEvent_Rc_Good) {
- printk(VIOTAPE_KERN_WARN "tape hv error on op %d\n",
- (int)hvrc);
- ret = -EIO;
- goto free_dma;
- }
-
- wait_for_completion(&op->com);
-
- if (op->rc)
- ret = tape_rc_to_errno(op->rc, "read", devi.devno);
- else {
- ret = op->count;
- if (ret && copy_to_user(buf, op->buffer, ret)) {
- printk(VIOTAPE_KERN_WARN "error on copy_to_user\n");
- ret = -EFAULT;
- }
- }
-
-free_dma:
- dma_free_coherent(op->dev, count, op->buffer, op->dmaaddr);
-up_sem:
- up(&reqSem);
-free_op:
- free_op_struct(op);
- return ret;
-}
-
-/* ioctl */
-static int viotap_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- HvLpEvent_Rc hvrc;
- int ret;
- struct viot_devinfo_struct devi;
- struct mtop mtc;
- u32 myOp;
- struct op_struct *op = get_op_struct();
-
- if (op == NULL)
- return -ENOMEM;
-
- get_dev_info(file->f_path.dentry->d_inode, &devi);
-
- down(&reqSem);
-
- ret = -EINVAL;
-
- switch (cmd) {
- case MTIOCTOP:
- ret = -EFAULT;
- /*
- * inode is null if and only if we (the kernel)
- * made the request
- */
- if (inode == NULL)
- memcpy(&mtc, (void *) arg, sizeof(struct mtop));
- else if (copy_from_user((char *)&mtc, (char *)arg,
- sizeof(struct mtop)))
- goto free_op;
-
- ret = -EIO;
- switch (mtc.mt_op) {
- case MTRESET:
- myOp = VIOTAPOP_RESET;
- break;
- case MTFSF:
- myOp = VIOTAPOP_FSF;
- break;
- case MTBSF:
- myOp = VIOTAPOP_BSF;
- break;
- case MTFSR:
- myOp = VIOTAPOP_FSR;
- break;
- case MTBSR:
- myOp = VIOTAPOP_BSR;
- break;
- case MTWEOF:
- myOp = VIOTAPOP_WEOF;
- break;
- case MTREW:
- myOp = VIOTAPOP_REW;
- break;
- case MTNOP:
- myOp = VIOTAPOP_NOP;
- break;
- case MTEOM:
- myOp = VIOTAPOP_EOM;
- break;
- case MTERASE:
- myOp = VIOTAPOP_ERASE;
- break;
- case MTSETBLK:
- myOp = VIOTAPOP_SETBLK;
- break;
- case MTSETDENSITY:
- myOp = VIOTAPOP_SETDENSITY;
- break;
- case MTTELL:
- myOp = VIOTAPOP_GETPOS;
- break;
- case MTSEEK:
- myOp = VIOTAPOP_SETPOS;
- break;
- case MTSETPART:
- myOp = VIOTAPOP_SETPART;
- break;
- case MTOFFL:
- myOp = VIOTAPOP_UNLOAD;
- break;
- default:
- printk(VIOTAPE_KERN_WARN "MTIOCTOP called "
- "with invalid op 0x%x\n", mtc.mt_op);
- goto free_op;
- }
-
- /*
- * if we moved the head, we are no longer
- * reading or writing
- */
- switch (mtc.mt_op) {
- case MTFSF:
- case MTBSF:
- case MTFSR:
- case MTBSR:
- case MTTELL:
- case MTSEEK:
- case MTREW:
- chg_state(devi.devno, VIOT_IDLE, file);
- }
-
- init_completion(&op->com);
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_tape | viotapeop,
- HvLpEvent_AckInd_DoAck,
- HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- (u64)(unsigned long)op,
- VIOVERSION << 16,
- ((u64)devi.devno << 48), 0,
- (((u64)myOp) << 32) | mtc.mt_count, 0);
- if (hvrc != HvLpEvent_Rc_Good) {
- printk(VIOTAPE_KERN_WARN "hv error on op %d\n",
- (int)hvrc);
- goto free_op;
- }
- wait_for_completion(&op->com);
- ret = tape_rc_to_errno(op->rc, "tape operation", devi.devno);
- goto free_op;
-
- case MTIOCGET:
- ret = -EIO;
- init_completion(&op->com);
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_tape | viotapegetstatus,
- HvLpEvent_AckInd_DoAck,
- HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- (u64)(unsigned long)op, VIOVERSION << 16,
- ((u64)devi.devno << 48), 0, 0, 0);
- if (hvrc != HvLpEvent_Rc_Good) {
- printk(VIOTAPE_KERN_WARN "hv error on op %d\n",
- (int)hvrc);
- goto free_op;
- }
- wait_for_completion(&op->com);
-
- /* Operation is complete - grab the error code */
- ret = tape_rc_to_errno(op->rc, "get status", devi.devno);
- free_op_struct(op);
- up(&reqSem);
-
- if ((ret == 0) && copy_to_user((void *)arg,
- &viomtget[devi.devno],
- sizeof(viomtget[0])))
- ret = -EFAULT;
- return ret;
- case MTIOCPOS:
- printk(VIOTAPE_KERN_WARN "Got an (unsupported) MTIOCPOS\n");
- break;
- default:
- printk(VIOTAPE_KERN_WARN "got an unsupported ioctl 0x%0x\n",
- cmd);
- break;
- }
-
-free_op:
- free_op_struct(op);
- up(&reqSem);
- return ret;
-}
-
-static long viotap_unlocked_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- long rc;
-
- mutex_lock(&proc_viotape_mutex);
- rc = viotap_ioctl(file->f_path.dentry->d_inode, file, cmd, arg);
- mutex_unlock(&proc_viotape_mutex);
- return rc;
-}
-
-static int viotap_open(struct inode *inode, struct file *file)
-{
- HvLpEvent_Rc hvrc;
- struct viot_devinfo_struct devi;
- int ret;
- struct op_struct *op = get_op_struct();
-
- if (op == NULL)
- return -ENOMEM;
-
- mutex_lock(&proc_viotape_mutex);
- get_dev_info(file->f_path.dentry->d_inode, &devi);
-
- /* Note: We currently only support one mode! */
- if ((devi.devno >= viotape_numdev) || (devi.mode)) {
- ret = -ENODEV;
- goto free_op;
- }
-
- init_completion(&op->com);
-
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_tape | viotapeopen,
- HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- (u64)(unsigned long)op, VIOVERSION << 16,
- ((u64)devi.devno << 48), 0, 0, 0);
- if (hvrc != 0) {
- printk(VIOTAPE_KERN_WARN "bad rc on signalLpEvent %d\n",
- (int) hvrc);
- ret = -EIO;
- goto free_op;
- }
-
- wait_for_completion(&op->com);
- ret = tape_rc_to_errno(op->rc, "open", devi.devno);
-
-free_op:
- free_op_struct(op);
- mutex_unlock(&proc_viotape_mutex);
- return ret;
-}
-
-
-static int viotap_release(struct inode *inode, struct file *file)
-{
- HvLpEvent_Rc hvrc;
- struct viot_devinfo_struct devi;
- int ret = 0;
- struct op_struct *op = get_op_struct();
-
- if (op == NULL)
- return -ENOMEM;
- init_completion(&op->com);
-
- get_dev_info(file->f_path.dentry->d_inode, &devi);
-
- if (devi.devno >= viotape_numdev) {
- ret = -ENODEV;
- goto free_op;
- }
-
- chg_state(devi.devno, VIOT_IDLE, file);
-
- if (devi.rewind) {
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_tape | viotapeop,
- HvLpEvent_AckInd_DoAck,
- HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- (u64)(unsigned long)op, VIOVERSION << 16,
- ((u64)devi.devno << 48), 0,
- ((u64)VIOTAPOP_REW) << 32, 0);
- wait_for_completion(&op->com);
-
- tape_rc_to_errno(op->rc, "rewind", devi.devno);
- }
-
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_tape | viotapeclose,
- HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- (u64)(unsigned long)op, VIOVERSION << 16,
- ((u64)devi.devno << 48), 0, 0, 0);
- if (hvrc != 0) {
- printk(VIOTAPE_KERN_WARN "bad rc on signalLpEvent %d\n",
- (int) hvrc);
- ret = -EIO;
- goto free_op;
- }
-
- wait_for_completion(&op->com);
-
- if (op->rc)
- printk(VIOTAPE_KERN_WARN "close failed\n");
-
-free_op:
- free_op_struct(op);
- return ret;
-}
-
-const struct file_operations viotap_fops = {
- .owner = THIS_MODULE,
- .read = viotap_read,
- .write = viotap_write,
- .unlocked_ioctl = viotap_unlocked_ioctl,
- .open = viotap_open,
- .release = viotap_release,
- .llseek = noop_llseek,
-};
-
-/* Handle interrupt events for tape */
-static void vioHandleTapeEvent(struct HvLpEvent *event)
-{
- int tapeminor;
- struct op_struct *op;
- struct viotapelpevent *tevent = (struct viotapelpevent *)event;
-
- if (event == NULL) {
- /* Notification that a partition went away! */
- if (!viopath_isactive(viopath_hostLp)) {
- /* TODO! Clean up */
- }
- return;
- }
-
- tapeminor = event->xSubtype & VIOMINOR_SUBTYPE_MASK;
- op = (struct op_struct *)event->xCorrelationToken;
- switch (tapeminor) {
- case viotapeopen:
- case viotapeclose:
- op->rc = tevent->sub_type_result;
- complete(&op->com);
- break;
- case viotaperead:
- op->rc = tevent->sub_type_result;
- op->count = tevent->len;
- complete(&op->com);
- break;
- case viotapewrite:
- if (op->non_blocking) {
- dma_free_coherent(op->dev, op->count,
- op->buffer, op->dmaaddr);
- free_op_struct(op);
- up(&reqSem);
- } else {
- op->rc = tevent->sub_type_result;
- op->count = tevent->len;
- complete(&op->com);
- }
- break;
- case viotapeop:
- case viotapegetpos:
- case viotapesetpos:
- case viotapegetstatus:
- if (op) {
- op->count = tevent->u.op.count;
- op->rc = tevent->sub_type_result;
- if (!op->non_blocking)
- complete(&op->com);
- }
- break;
- default:
- printk(VIOTAPE_KERN_WARN "weird ack\n");
- }
-}
-
-static int viotape_probe(struct vio_dev *vdev, const struct vio_device_id *id)
-{
- int i = vdev->unit_address;
- int j;
- struct device_node *node = vdev->dev.of_node;
-
- if (i >= VIOTAPE_MAX_TAPE)
- return -ENODEV;
- if (!node)
- return -ENODEV;
-
- if (i >= viotape_numdev)
- viotape_numdev = i + 1;
-
- tape_device[i] = &vdev->dev;
- viotape_unitinfo[i].rsrcname = of_get_property(node,
- "linux,vio_rsrcname", NULL);
- viotape_unitinfo[i].type = of_get_property(node, "linux,vio_type",
- NULL);
- viotape_unitinfo[i].model = of_get_property(node, "linux,vio_model",
- NULL);
-
- state[i].cur_part = 0;
- for (j = 0; j < MAX_PARTITIONS; ++j)
- state[i].part_stat_rwi[j] = VIOT_IDLE;
- device_create(tape_class, NULL, MKDEV(VIOTAPE_MAJOR, i), NULL,
- "iseries!vt%d", i);
- device_create(tape_class, NULL, MKDEV(VIOTAPE_MAJOR, i | 0x80), NULL,
- "iseries!nvt%d", i);
- printk(VIOTAPE_KERN_INFO "tape iseries/vt%d is iSeries "
- "resource %10.10s type %4.4s, model %3.3s\n",
- i, viotape_unitinfo[i].rsrcname,
- viotape_unitinfo[i].type, viotape_unitinfo[i].model);
- return 0;
-}
-
-static int viotape_remove(struct vio_dev *vdev)
-{
- int i = vdev->unit_address;
-
- device_destroy(tape_class, MKDEV(VIOTAPE_MAJOR, i | 0x80));
- device_destroy(tape_class, MKDEV(VIOTAPE_MAJOR, i));
- return 0;
-}
-
-/**
- * viotape_device_table: Used by vio.c to match devices that we
- * support.
- */
-static struct vio_device_id viotape_device_table[] __devinitdata = {
- { "byte", "IBM,iSeries-viotape" },
- { "", "" }
-};
-MODULE_DEVICE_TABLE(vio, viotape_device_table);
-
-static struct vio_driver viotape_driver = {
- .id_table = viotape_device_table,
- .probe = viotape_probe,
- .remove = viotape_remove,
- .driver = {
- .name = "viotape",
- .owner = THIS_MODULE,
- }
-};
-
-
-int __init viotap_init(void)
-{
- int ret;
-
- if (!firmware_has_feature(FW_FEATURE_ISERIES))
- return -ENODEV;
-
- op_struct_list = NULL;
- if ((ret = add_op_structs(VIOTAPE_MAXREQ)) < 0) {
- printk(VIOTAPE_KERN_WARN "couldn't allocate op structs\n");
- return ret;
- }
- spin_lock_init(&op_struct_list_lock);
-
- sema_init(&reqSem, VIOTAPE_MAXREQ);
-
- if (viopath_hostLp == HvLpIndexInvalid) {
- vio_set_hostlp();
- if (viopath_hostLp == HvLpIndexInvalid) {
- ret = -ENODEV;
- goto clear_op;
- }
- }
-
- ret = viopath_open(viopath_hostLp, viomajorsubtype_tape,
- VIOTAPE_MAXREQ + 2);
- if (ret) {
- printk(VIOTAPE_KERN_WARN
- "error on viopath_open to hostlp %d\n", ret);
- ret = -EIO;
- goto clear_op;
- }
-
- printk(VIOTAPE_KERN_INFO "vers " VIOTAPE_VERSION
- ", hosting partition %d\n", viopath_hostLp);
-
- vio_setHandler(viomajorsubtype_tape, vioHandleTapeEvent);
-
- ret = register_chrdev(VIOTAPE_MAJOR, "viotape", &viotap_fops);
- if (ret < 0) {
- printk(VIOTAPE_KERN_WARN "Error registering viotape device\n");
- goto clear_handler;
- }
-
- tape_class = class_create(THIS_MODULE, "tape");
- if (IS_ERR(tape_class)) {
- printk(VIOTAPE_KERN_WARN "Unable to allocate class\n");
- ret = PTR_ERR(tape_class);
- goto unreg_chrdev;
- }
-
- ret = vio_register_driver(&viotape_driver);
- if (ret)
- goto unreg_class;
-
- proc_create("iSeries/viotape", S_IFREG|S_IRUGO, NULL,
- &proc_viotape_operations);
-
- return 0;
-
-unreg_class:
- class_destroy(tape_class);
-unreg_chrdev:
- unregister_chrdev(VIOTAPE_MAJOR, "viotape");
-clear_handler:
- vio_clearHandler(viomajorsubtype_tape);
- viopath_close(viopath_hostLp, viomajorsubtype_tape, VIOTAPE_MAXREQ + 2);
-clear_op:
- clear_op_struct_pool();
- return ret;
-}
-
-/* Give a new state to the tape object */
-static int chg_state(int index, unsigned char new_state, struct file *file)
-{
- unsigned char *cur_state =
- &state[index].part_stat_rwi[state[index].cur_part];
- int rc = 0;
-
- /* if the same state, don't bother */
- if (*cur_state == new_state)
- return 0;
-
- /* write an EOF if changing from writing to some other state */
- if (*cur_state == VIOT_WRITING) {
- struct mtop write_eof = { MTWEOF, 1 };
-
- rc = viotap_ioctl(NULL, file, MTIOCTOP,
- (unsigned long)&write_eof);
- }
- *cur_state = new_state;
- return rc;
-}
-
-/* Cleanup */
-static void __exit viotap_exit(void)
-{
- remove_proc_entry("iSeries/viotape", NULL);
- vio_unregister_driver(&viotape_driver);
- class_destroy(tape_class);
- unregister_chrdev(VIOTAPE_MAJOR, "viotape");
- viopath_close(viopath_hostLp, viomajorsubtype_tape, VIOTAPE_MAXREQ + 2);
- vio_clearHandler(viomajorsubtype_tape);
- clear_op_struct_pool();
-}
-
-MODULE_LICENSE("GPL");
-module_init(viotap_init);
-module_exit(viotap_exit);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index d0c4118..0409cf3 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -190,6 +190,17 @@ config GPIO_VX855
additional drivers must be enabled in order to use the
functionality of the device.
+config GPIO_GE_FPGA
+ bool "GE FPGA based GPIO"
+ depends on GE_FPGA
+ help
+ Support for common GPIO functionality provided on some GE Single Board
+ Computers.
+
+ This driver provides basic support (configure as input or output, read
+ and write pin state) for GPIO implemented in a number of GE single
+ board computers.
+
comment "I2C GPIO expanders:"
config GPIO_MAX7300
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index fa10df6..9a8fb54 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_GPIO_CS5535) += gpio-cs5535.o
obj-$(CONFIG_GPIO_DA9052) += gpio-da9052.o
obj-$(CONFIG_ARCH_DAVINCI) += gpio-davinci.o
obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
+obj-$(CONFIG_GPIO_GE_FPGA) += gpio-ge.o
obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o
obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o
diff --git a/drivers/gpio/gpio-ge.c b/drivers/gpio/gpio-ge.c
new file mode 100644
index 0000000..7b95a4a
--- /dev/null
+++ b/drivers/gpio/gpio-ge.c
@@ -0,0 +1,199 @@
+/*
+ * Driver for GE FPGA based GPIO
+ *
+ * Author: Martyn Welch <martyn.welch@ge.com>
+ *
+ * 2008 (c) GE Intelligent Platforms Embedded Systems, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/* TODO
+ *
+ * Configuration of output modes (totem-pole/open-drain)
+ * Interrupt configuration - interrupts are always generated the FPGA relies on
+ * the I/O interrupt controllers mask to stop them propergating
+ */
+
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#define GEF_GPIO_DIRECT 0x00
+#define GEF_GPIO_IN 0x04
+#define GEF_GPIO_OUT 0x08
+#define GEF_GPIO_TRIG 0x0C
+#define GEF_GPIO_POLAR_A 0x10
+#define GEF_GPIO_POLAR_B 0x14
+#define GEF_GPIO_INT_STAT 0x18
+#define GEF_GPIO_OVERRUN 0x1C
+#define GEF_GPIO_MODE 0x20
+
+static void _gef_gpio_set(void __iomem *reg, unsigned int offset, int value)
+{
+ unsigned int data;
+
+ data = ioread32be(reg);
+ /* value: 0=low; 1=high */
+ if (value & 0x1)
+ data = data | (0x1 << offset);
+ else
+ data = data & ~(0x1 << offset);
+
+ iowrite32be(data, reg);
+}
+
+
+static int gef_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
+{
+ unsigned int data;
+ struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
+
+ data = ioread32be(mmchip->regs + GEF_GPIO_DIRECT);
+ data = data | (0x1 << offset);
+ iowrite32be(data, mmchip->regs + GEF_GPIO_DIRECT);
+
+ return 0;
+}
+
+static int gef_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int value)
+{
+ unsigned int data;
+ struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
+
+ /* Set direction before switching to input */
+ _gef_gpio_set(mmchip->regs + GEF_GPIO_OUT, offset, value);
+
+ data = ioread32be(mmchip->regs + GEF_GPIO_DIRECT);
+ data = data & ~(0x1 << offset);
+ iowrite32be(data, mmchip->regs + GEF_GPIO_DIRECT);
+
+ return 0;
+}
+
+static int gef_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ unsigned int data;
+ int state = 0;
+ struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
+
+ data = ioread32be(mmchip->regs + GEF_GPIO_IN);
+ state = (int)((data >> offset) & 0x1);
+
+ return state;
+}
+
+static void gef_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
+
+ _gef_gpio_set(mmchip->regs + GEF_GPIO_OUT, offset, value);
+}
+
+static int __init gef_gpio_init(void)
+{
+ struct device_node *np;
+ int retval;
+ struct of_mm_gpio_chip *gef_gpio_chip;
+
+ for_each_compatible_node(np, NULL, "gef,sbc610-gpio") {
+
+ pr_debug("%s: Initialising GEF GPIO\n", np->full_name);
+
+ /* Allocate chip structure */
+ gef_gpio_chip = kzalloc(sizeof(*gef_gpio_chip), GFP_KERNEL);
+ if (!gef_gpio_chip) {
+ pr_err("%s: Unable to allocate structure\n",
+ np->full_name);
+ continue;
+ }
+
+ /* Setup pointers to chip functions */
+ gef_gpio_chip->gc.of_gpio_n_cells = 2;
+ gef_gpio_chip->gc.ngpio = 19;
+ gef_gpio_chip->gc.direction_input = gef_gpio_dir_in;
+ gef_gpio_chip->gc.direction_output = gef_gpio_dir_out;
+ gef_gpio_chip->gc.get = gef_gpio_get;
+ gef_gpio_chip->gc.set = gef_gpio_set;
+
+ /* This function adds a memory mapped GPIO chip */
+ retval = of_mm_gpiochip_add(np, gef_gpio_chip);
+ if (retval) {
+ kfree(gef_gpio_chip);
+ pr_err("%s: Unable to add GPIO\n", np->full_name);
+ }
+ }
+
+ for_each_compatible_node(np, NULL, "gef,sbc310-gpio") {
+
+ pr_debug("%s: Initialising GEF GPIO\n", np->full_name);
+
+ /* Allocate chip structure */
+ gef_gpio_chip = kzalloc(sizeof(*gef_gpio_chip), GFP_KERNEL);
+ if (!gef_gpio_chip) {
+ pr_err("%s: Unable to allocate structure\n",
+ np->full_name);
+ continue;
+ }
+
+ /* Setup pointers to chip functions */
+ gef_gpio_chip->gc.of_gpio_n_cells = 2;
+ gef_gpio_chip->gc.ngpio = 6;
+ gef_gpio_chip->gc.direction_input = gef_gpio_dir_in;
+ gef_gpio_chip->gc.direction_output = gef_gpio_dir_out;
+ gef_gpio_chip->gc.get = gef_gpio_get;
+ gef_gpio_chip->gc.set = gef_gpio_set;
+
+ /* This function adds a memory mapped GPIO chip */
+ retval = of_mm_gpiochip_add(np, gef_gpio_chip);
+ if (retval) {
+ kfree(gef_gpio_chip);
+ pr_err("%s: Unable to add GPIO\n", np->full_name);
+ }
+ }
+
+ for_each_compatible_node(np, NULL, "ge,imp3a-gpio") {
+
+ pr_debug("%s: Initialising GE GPIO\n", np->full_name);
+
+ /* Allocate chip structure */
+ gef_gpio_chip = kzalloc(sizeof(*gef_gpio_chip), GFP_KERNEL);
+ if (!gef_gpio_chip) {
+ pr_err("%s: Unable to allocate structure\n",
+ np->full_name);
+ continue;
+ }
+
+ /* Setup pointers to chip functions */
+ gef_gpio_chip->gc.of_gpio_n_cells = 2;
+ gef_gpio_chip->gc.ngpio = 16;
+ gef_gpio_chip->gc.direction_input = gef_gpio_dir_in;
+ gef_gpio_chip->gc.direction_output = gef_gpio_dir_out;
+ gef_gpio_chip->gc.get = gef_gpio_get;
+ gef_gpio_chip->gc.set = gef_gpio_set;
+
+ /* This function adds a memory mapped GPIO chip */
+ retval = of_mm_gpiochip_add(np, gef_gpio_chip);
+ if (retval) {
+ kfree(gef_gpio_chip);
+ pr_err("%s: Unable to add GPIO\n", np->full_name);
+ }
+ }
+
+ return 0;
+};
+arch_initcall(gef_gpio_init);
+
+MODULE_DESCRIPTION("GE I/O FPGA GPIO driver");
+MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
index 366bc15..8c279da 100644
--- a/drivers/misc/carma/carma-fpga.c
+++ b/drivers/misc/carma/carma-fpga.c
@@ -560,6 +560,9 @@ static void data_enable_interrupts(struct fpga_device *priv)
/* flush the writes */
fpga_read_reg(priv, 0, MMAP_REG_STATUS);
+ fpga_read_reg(priv, 1, MMAP_REG_STATUS);
+ fpga_read_reg(priv, 2, MMAP_REG_STATUS);
+ fpga_read_reg(priv, 3, MMAP_REG_STATUS);
/* switch back to the external interrupt source */
iowrite32be(0x3F, priv->regs + SYS_IRQ_SOURCE_CTL);
@@ -591,8 +594,12 @@ static void data_dma_cb(void *data)
list_move_tail(&priv->inflight->entry, &priv->used);
priv->inflight = NULL;
- /* clear the FPGA status and re-enable interrupts */
- data_enable_interrupts(priv);
+ /*
+ * If data dumping is still enabled, then clear the FPGA
+ * status registers and re-enable FPGA interrupts
+ */
+ if (priv->enabled)
+ data_enable_interrupts(priv);
spin_unlock_irqrestore(&priv->lock, flags);
@@ -708,6 +715,15 @@ static irqreturn_t data_irq(int irq, void *dev_id)
spin_lock(&priv->lock);
+ /*
+ * This is an error case that should never happen.
+ *
+ * If this driver has a bug and manages to re-enable interrupts while
+ * a DMA is in progress, then we will hit this statement and should
+ * start paying attention immediately.
+ */
+ BUG_ON(priv->inflight != NULL);
+
/* hide the interrupt by switching the IRQ driver to GPIO */
data_disable_interrupts(priv);
@@ -762,11 +778,15 @@ out:
*/
static int data_device_enable(struct fpga_device *priv)
{
+ bool enabled;
u32 val;
int ret;
/* multiple enables are safe: they do nothing */
- if (priv->enabled)
+ spin_lock_irq(&priv->lock);
+ enabled = priv->enabled;
+ spin_unlock_irq(&priv->lock);
+ if (enabled)
return 0;
/* check that the FPGAs are programmed */
@@ -797,6 +817,9 @@ static int data_device_enable(struct fpga_device *priv)
goto out_error;
}
+ /* prevent the FPGAs from generating interrupts */
+ data_disable_interrupts(priv);
+
/* hookup the irq handler */
ret = request_irq(priv->irq, data_irq, IRQF_SHARED, drv_name, priv);
if (ret) {
@@ -804,11 +827,13 @@ static int data_device_enable(struct fpga_device *priv)
goto out_error;
}
- /* switch to the external FPGA IRQ line */
- data_enable_interrupts(priv);
-
- /* success, we're enabled */
+ /* allow the DMA callback to re-enable FPGA interrupts */
+ spin_lock_irq(&priv->lock);
priv->enabled = true;
+ spin_unlock_irq(&priv->lock);
+
+ /* allow the FPGAs to generate interrupts */
+ data_enable_interrupts(priv);
return 0;
out_error:
@@ -834,41 +859,40 @@ out_error:
*/
static int data_device_disable(struct fpga_device *priv)
{
- int ret;
+ spin_lock_irq(&priv->lock);
/* allow multiple disable */
- if (!priv->enabled)
+ if (!priv->enabled) {
+ spin_unlock_irq(&priv->lock);
return 0;
+ }
+
+ /*
+ * Mark the device disabled
+ *
+ * This stops DMA callbacks from re-enabling interrupts
+ */
+ priv->enabled = false;
- /* switch to the internal GPIO IRQ line */
+ /* prevent the FPGAs from generating interrupts */
data_disable_interrupts(priv);
+ /* wait until all ongoing DMA has finished */
+ while (priv->inflight != NULL) {
+ spin_unlock_irq(&priv->lock);
+ wait_event(priv->wait, priv->inflight == NULL);
+ spin_lock_irq(&priv->lock);
+ }
+
+ spin_unlock_irq(&priv->lock);
+
/* unhook the irq handler */
free_irq(priv->irq, priv);
- /*
- * wait for all outstanding DMA to complete
- *
- * Device interrupts are disabled, therefore another buffer cannot
- * be marked inflight.
- */
- ret = wait_event_interruptible(priv->wait, priv->inflight == NULL);
- if (ret)
- return ret;
-
/* free the correlation table */
sg_free_table(&priv->corl_table);
priv->corl_nents = 0;
- /*
- * We are taking the spinlock not to protect priv->enabled, but instead
- * to make sure that there are no readers in the process of altering
- * the free or used lists while we are setting this flag.
- */
- spin_lock_irq(&priv->lock);
- priv->enabled = false;
- spin_unlock_irq(&priv->lock);
-
/* free all buffers: the free and used lists are not being changed */
data_free_buffers(priv);
return 0;
@@ -896,15 +920,6 @@ static unsigned int list_num_entries(struct list_head *list)
static int data_debug_show(struct seq_file *f, void *offset)
{
struct fpga_device *priv = f->private;
- int ret;
-
- /*
- * Lock the mutex first, so that we get an accurate value for enable
- * Lock the spinlock next, to get accurate list counts
- */
- ret = mutex_lock_interruptible(&priv->mutex);
- if (ret)
- return ret;
spin_lock_irq(&priv->lock);
@@ -917,7 +932,6 @@ static int data_debug_show(struct seq_file *f, void *offset)
seq_printf(f, "num_dropped: %d\n", priv->num_dropped);
spin_unlock_irq(&priv->lock);
- mutex_unlock(&priv->mutex);
return 0;
}
@@ -970,7 +984,13 @@ static ssize_t data_en_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct fpga_device *priv = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%u\n", priv->enabled);
+ int ret;
+
+ spin_lock_irq(&priv->lock);
+ ret = snprintf(buf, PAGE_SIZE, "%u\n", priv->enabled);
+ spin_unlock_irq(&priv->lock);
+
+ return ret;
}
static ssize_t data_en_set(struct device *dev, struct device_attribute *attr,
@@ -986,6 +1006,7 @@ static ssize_t data_en_set(struct device *dev, struct device_attribute *attr,
return -EINVAL;
}
+ /* protect against concurrent enable/disable */
ret = mutex_lock_interruptible(&priv->mutex);
if (ret)
return ret;
@@ -1079,6 +1100,7 @@ static ssize_t data_read(struct file *filp, char __user *ubuf, size_t count,
struct fpga_reader *reader = filp->private_data;
struct fpga_device *priv = reader->priv;
struct list_head *used = &priv->used;
+ bool drop_buffer = false;
struct data_buf *dbuf;
size_t avail;
void *data;
@@ -1166,10 +1188,12 @@ have_buffer:
* One of two things has happened, the device is disabled, or the
* device has been reconfigured underneath us. In either case, we
* should just throw away the buffer.
+ *
+ * Lockdep complains if this is done under the spinlock, so we
+ * handle it during the unlock path.
*/
if (!priv->enabled || dbuf->size != priv->bufsize) {
- videobuf_dma_unmap(priv->dev, &dbuf->vb);
- data_free_buffer(dbuf);
+ drop_buffer = true;
goto out_unlock;
}
@@ -1178,6 +1202,12 @@ have_buffer:
out_unlock:
spin_unlock_irq(&priv->lock);
+
+ if (drop_buffer) {
+ videobuf_dma_unmap(priv->dev, &dbuf->vb);
+ data_free_buffer(dbuf);
+ }
+
return count;
}
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 31b034b..3b1d6da 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -462,6 +462,16 @@ config MTD_NAND_FSL_ELBC
Enabling this option will enable you to use this to control
external NAND devices.
+config MTD_NAND_FSL_IFC
+ tristate "NAND support for Freescale IFC controller"
+ depends on MTD_NAND && FSL_SOC
+ select FSL_IFC
+ help
+ Various Freescale chips e.g P1010, include a NAND Flash machine
+ with built-in hardware ECC capabilities.
+ Enabling this option will enable you to use this to control
+ external NAND devices.
+
config MTD_NAND_FSL_UPM
tristate "Support for NAND on Freescale UPM"
depends on PPC_83xx || PPC_85xx
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 618f4ba..19bc8cb 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_MTD_ALAUDA) += alauda.o
obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o
obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o
obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o
+obj-$(CONFIG_MTD_NAND_FSL_IFC) += fsl_ifc_nand.o
obj-$(CONFIG_MTD_NAND_FSL_UPM) += fsl_upm.o
obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o
obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
new file mode 100644
index 0000000..c30ac7b
--- /dev/null
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -0,0 +1,1072 @@
+/*
+ * Freescale Integrated Flash Controller NAND driver
+ *
+ * Copyright 2011-2012 Freescale Semiconductor, Inc
+ *
+ * Author: Dipen Dudhat <Dipen.Dudhat@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/nand_ecc.h>
+#include <asm/fsl_ifc.h>
+
+#define ERR_BYTE 0xFF /* Value returned for read
+ bytes when read failed */
+#define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait
+ for IFC NAND Machine */
+
+struct fsl_ifc_ctrl;
+
+/* mtd information per set */
+struct fsl_ifc_mtd {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ struct fsl_ifc_ctrl *ctrl;
+
+ struct device *dev;
+ int bank; /* Chip select bank number */
+ unsigned int bufnum_mask; /* bufnum = page & bufnum_mask */
+ u8 __iomem *vbase; /* Chip select base virtual address */
+};
+
+/* overview of the fsl ifc controller */
+struct fsl_ifc_nand_ctrl {
+ struct nand_hw_control controller;
+ struct fsl_ifc_mtd *chips[FSL_IFC_BANK_COUNT];
+
+ u8 __iomem *addr; /* Address of assigned IFC buffer */
+ unsigned int page; /* Last page written to / read from */
+ unsigned int read_bytes;/* Number of bytes read during command */
+ unsigned int column; /* Saved column from SEQIN */
+ unsigned int index; /* Pointer to next byte to 'read' */
+ unsigned int oob; /* Non zero if operating on OOB data */
+ unsigned int eccread; /* Non zero for a full-page ECC read */
+ unsigned int counter; /* counter for the initializations */
+};
+
+static struct fsl_ifc_nand_ctrl *ifc_nand_ctrl;
+
+/* 512-byte page with 4-bit ECC, 8-bit */
+static struct nand_ecclayout oob_512_8bit_ecc4 = {
+ .eccbytes = 8,
+ .eccpos = {8, 9, 10, 11, 12, 13, 14, 15},
+ .oobfree = { {0, 5}, {6, 2} },
+};
+
+/* 512-byte page with 4-bit ECC, 16-bit */
+static struct nand_ecclayout oob_512_16bit_ecc4 = {
+ .eccbytes = 8,
+ .eccpos = {8, 9, 10, 11, 12, 13, 14, 15},
+ .oobfree = { {2, 6}, },
+};
+
+/* 2048-byte page size with 4-bit ECC */
+static struct nand_ecclayout oob_2048_ecc4 = {
+ .eccbytes = 32,
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ },
+ .oobfree = { {2, 6}, {40, 24} },
+};
+
+/* 4096-byte page size with 4-bit ECC */
+static struct nand_ecclayout oob_4096_ecc4 = {
+ .eccbytes = 64,
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ },
+ .oobfree = { {2, 6}, {72, 56} },
+};
+
+/* 4096-byte page size with 8-bit ECC -- requires 218-byte OOB */
+static struct nand_ecclayout oob_4096_ecc8 = {
+ .eccbytes = 128,
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135,
+ },
+ .oobfree = { {2, 6}, {136, 82} },
+};
+
+
+/*
+ * Generic flash bbt descriptors
+ */
+static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 2, /* 0 on 8-bit small page */
+ .len = 4,
+ .veroffs = 6,
+ .maxblocks = 4,
+ .pattern = bbt_pattern,
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 2, /* 0 on 8-bit small page */
+ .len = 4,
+ .veroffs = 6,
+ .maxblocks = 4,
+ .pattern = mirror_pattern,
+};
+
+/*
+ * Set up the IFC hardware block and page address fields, and the ifc nand
+ * structure addr field to point to the correct IFC buffer in memory
+ */
+static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ int buf_num;
+
+ ifc_nand_ctrl->page = page_addr;
+ /* Program ROW0/COL0 */
+ out_be32(&ifc->ifc_nand.row0, page_addr);
+ out_be32(&ifc->ifc_nand.col0, (oob ? IFC_NAND_COL_MS : 0) | column);
+
+ buf_num = page_addr & priv->bufnum_mask;
+
+ ifc_nand_ctrl->addr = priv->vbase + buf_num * (mtd->writesize * 2);
+ ifc_nand_ctrl->index = column;
+
+ /* for OOB data point to the second half of the buffer */
+ if (oob)
+ ifc_nand_ctrl->index += mtd->writesize;
+}
+
+static int is_blank(struct mtd_info *mtd, unsigned int bufnum)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ u8 __iomem *addr = priv->vbase + bufnum * (mtd->writesize * 2);
+ u32 __iomem *mainarea = (u32 *)addr;
+ u8 __iomem *oob = addr + mtd->writesize;
+ int i;
+
+ for (i = 0; i < mtd->writesize / 4; i++) {
+ if (__raw_readl(&mainarea[i]) != 0xffffffff)
+ return 0;
+ }
+
+ for (i = 0; i < chip->ecc.layout->eccbytes; i++) {
+ int pos = chip->ecc.layout->eccpos[i];
+
+ if (__raw_readb(&oob[pos]) != 0xff)
+ return 0;
+ }
+
+ return 1;
+}
+
+/* returns nonzero if entire page is blank */
+static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl,
+ u32 *eccstat, unsigned int bufnum)
+{
+ u32 reg = eccstat[bufnum / 4];
+ int errors;
+
+ errors = (reg >> ((3 - bufnum % 4) * 8)) & 15;
+
+ return errors;
+}
+
+/*
+ * execute IFC NAND command and wait for it to complete
+ */
+static void fsl_ifc_run_command(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ u32 eccstat[4];
+ int i;
+
+ /* set the chip select for NAND Transaction */
+ out_be32(&ifc->ifc_nand.nand_csel, priv->bank << IFC_NAND_CSEL_SHIFT);
+
+ dev_vdbg(priv->dev,
+ "%s: fir0=%08x fcr0=%08x\n",
+ __func__,
+ in_be32(&ifc->ifc_nand.nand_fir0),
+ in_be32(&ifc->ifc_nand.nand_fcr0));
+
+ ctrl->nand_stat = 0;
+
+ /* start read/write seq */
+ out_be32(&ifc->ifc_nand.nandseq_strt, IFC_NAND_SEQ_STRT_FIR_STRT);
+
+ /* wait for command complete flag or timeout */
+ wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
+ IFC_TIMEOUT_MSECS * HZ/1000);
+
+ /* ctrl->nand_stat will be updated from IRQ context */
+ if (!ctrl->nand_stat)
+ dev_err(priv->dev, "Controller is not responding\n");
+ if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_FTOER)
+ dev_err(priv->dev, "NAND Flash Timeout Error\n");
+ if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_WPER)
+ dev_err(priv->dev, "NAND Flash Write Protect Error\n");
+
+ if (nctrl->eccread) {
+ int errors;
+ int bufnum = nctrl->page & priv->bufnum_mask;
+ int sector = bufnum * chip->ecc.steps;
+ int sector_end = sector + chip->ecc.steps - 1;
+
+ for (i = sector / 4; i <= sector_end / 4; i++)
+ eccstat[i] = in_be32(&ifc->ifc_nand.nand_eccstat[i]);
+
+ for (i = sector; i <= sector_end; i++) {
+ errors = check_read_ecc(mtd, ctrl, eccstat, i);
+
+ if (errors == 15) {
+ /*
+ * Uncorrectable error.
+ * OK only if the whole page is blank.
+ *
+ * We disable ECCER reporting due to...
+ * erratum IFC-A002770 -- so report it now if we
+ * see an uncorrectable error in ECCSTAT.
+ */
+ if (!is_blank(mtd, bufnum))
+ ctrl->nand_stat |=
+ IFC_NAND_EVTER_STAT_ECCER;
+ break;
+ }
+
+ mtd->ecc_stats.corrected += errors;
+ }
+
+ nctrl->eccread = 0;
+ }
+}
+
+static void fsl_ifc_do_read(struct nand_chip *chip,
+ int oob,
+ struct mtd_info *mtd)
+{
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+
+ /* Program FIR/IFC_NAND_FCR0 for Small/Large page */
+ if (mtd->writesize > 512) {
+ out_be32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT));
+ out_be32(&ifc->ifc_nand.nand_fir1, 0x0);
+
+ out_be32(&ifc->ifc_nand.nand_fcr0,
+ (NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT));
+ } else {
+ out_be32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT));
+ out_be32(&ifc->ifc_nand.nand_fir1, 0x0);
+
+ if (oob)
+ out_be32(&ifc->ifc_nand.nand_fcr0,
+ NAND_CMD_READOOB << IFC_NAND_FCR0_CMD0_SHIFT);
+ else
+ out_be32(&ifc->ifc_nand.nand_fcr0,
+ NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT);
+ }
+}
+
+/* cmdfunc send commands to the IFC NAND Machine */
+static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr) {
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+
+ /* clear the read buffer */
+ ifc_nand_ctrl->read_bytes = 0;
+ if (command != NAND_CMD_PAGEPROG)
+ ifc_nand_ctrl->index = 0;
+
+ switch (command) {
+ /* READ0 read the entire buffer to use hardware ECC. */
+ case NAND_CMD_READ0:
+ out_be32(&ifc->ifc_nand.nand_fbcr, 0);
+ set_addr(mtd, 0, page_addr, 0);
+
+ ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+ ifc_nand_ctrl->index += column;
+
+ if (chip->ecc.mode == NAND_ECC_HW)
+ ifc_nand_ctrl->eccread = 1;
+
+ fsl_ifc_do_read(chip, 0, mtd);
+ fsl_ifc_run_command(mtd);
+ return;
+
+ /* READOOB reads only the OOB because no ECC is performed. */
+ case NAND_CMD_READOOB:
+ out_be32(&ifc->ifc_nand.nand_fbcr, mtd->oobsize - column);
+ set_addr(mtd, column, page_addr, 1);
+
+ ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+
+ fsl_ifc_do_read(chip, 1, mtd);
+ fsl_ifc_run_command(mtd);
+
+ return;
+
+ /* READID must read all 8 possible bytes */
+ case NAND_CMD_READID:
+ out_be32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT));
+ out_be32(&ifc->ifc_nand.nand_fcr0,
+ NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT);
+ /* 8 bytes for manuf, device and exts */
+ out_be32(&ifc->ifc_nand.nand_fbcr, 8);
+ ifc_nand_ctrl->read_bytes = 8;
+
+ set_addr(mtd, 0, 0, 0);
+ fsl_ifc_run_command(mtd);
+ return;
+
+ /* ERASE1 stores the block and page address */
+ case NAND_CMD_ERASE1:
+ set_addr(mtd, 0, page_addr, 0);
+ return;
+
+ /* ERASE2 uses the block and page address from ERASE1 */
+ case NAND_CMD_ERASE2:
+ out_be32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT));
+
+ out_be32(&ifc->ifc_nand.nand_fcr0,
+ (NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT));
+
+ out_be32(&ifc->ifc_nand.nand_fbcr, 0);
+ ifc_nand_ctrl->read_bytes = 0;
+ fsl_ifc_run_command(mtd);
+ return;
+
+ /* SEQIN sets up the addr buffer and all registers except the length */
+ case NAND_CMD_SEQIN: {
+ u32 nand_fcr0;
+ ifc_nand_ctrl->column = column;
+ ifc_nand_ctrl->oob = 0;
+
+ if (mtd->writesize > 512) {
+ nand_fcr0 =
+ (NAND_CMD_SEQIN << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD1_SHIFT);
+
+ out_be32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_CW1 << IFC_NAND_FIR0_OP4_SHIFT));
+ } else {
+ nand_fcr0 = ((NAND_CMD_PAGEPROG <<
+ IFC_NAND_FCR0_CMD1_SHIFT) |
+ (NAND_CMD_SEQIN <<
+ IFC_NAND_FCR0_CMD2_SHIFT));
+
+ out_be32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT));
+ out_be32(&ifc->ifc_nand.nand_fir1,
+ (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT));
+
+ if (column >= mtd->writesize)
+ nand_fcr0 |=
+ NAND_CMD_READOOB << IFC_NAND_FCR0_CMD0_SHIFT;
+ else
+ nand_fcr0 |=
+ NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT;
+ }
+
+ if (column >= mtd->writesize) {
+ /* OOB area --> READOOB */
+ column -= mtd->writesize;
+ ifc_nand_ctrl->oob = 1;
+ }
+ out_be32(&ifc->ifc_nand.nand_fcr0, nand_fcr0);
+ set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob);
+ return;
+ }
+
+ /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
+ case NAND_CMD_PAGEPROG: {
+ if (ifc_nand_ctrl->oob) {
+ out_be32(&ifc->ifc_nand.nand_fbcr,
+ ifc_nand_ctrl->index - ifc_nand_ctrl->column);
+ } else {
+ out_be32(&ifc->ifc_nand.nand_fbcr, 0);
+ }
+
+ fsl_ifc_run_command(mtd);
+ return;
+ }
+
+ case NAND_CMD_STATUS:
+ out_be32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT));
+ out_be32(&ifc->ifc_nand.nand_fcr0,
+ NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT);
+ out_be32(&ifc->ifc_nand.nand_fbcr, 1);
+ set_addr(mtd, 0, 0, 0);
+ ifc_nand_ctrl->read_bytes = 1;
+
+ fsl_ifc_run_command(mtd);
+
+ /*
+ * The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
+ setbits8(ifc_nand_ctrl->addr, NAND_STATUS_WP);
+ return;
+
+ case NAND_CMD_RESET:
+ out_be32(&ifc->ifc_nand.nand_fir0,
+ IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT);
+ out_be32(&ifc->ifc_nand.nand_fcr0,
+ NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT);
+ fsl_ifc_run_command(mtd);
+ return;
+
+ default:
+ dev_err(priv->dev, "%s: error, unsupported command 0x%x.\n",
+ __func__, command);
+ }
+}
+
+static void fsl_ifc_select_chip(struct mtd_info *mtd, int chip)
+{
+ /* The hardware does not seem to support multiple
+ * chips per bank.
+ */
+}
+
+/*
+ * Write buf to the IFC NAND Controller Data Buffer
+ */
+static void fsl_ifc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ unsigned int bufsize = mtd->writesize + mtd->oobsize;
+
+ if (len <= 0) {
+ dev_err(priv->dev, "%s: len %d bytes", __func__, len);
+ return;
+ }
+
+ if ((unsigned int)len > bufsize - ifc_nand_ctrl->index) {
+ dev_err(priv->dev,
+ "%s: beyond end of buffer (%d requested, %u available)\n",
+ __func__, len, bufsize - ifc_nand_ctrl->index);
+ len = bufsize - ifc_nand_ctrl->index;
+ }
+
+ memcpy_toio(&ifc_nand_ctrl->addr[ifc_nand_ctrl->index], buf, len);
+ ifc_nand_ctrl->index += len;
+}
+
+/*
+ * Read a byte from either the IFC hardware buffer
+ * read function for 8-bit buswidth
+ */
+static uint8_t fsl_ifc_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+
+ /*
+ * If there are still bytes in the IFC buffer, then use the
+ * next byte.
+ */
+ if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes)
+ return in_8(&ifc_nand_ctrl->addr[ifc_nand_ctrl->index++]);
+
+ dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
+ return ERR_BYTE;
+}
+
+/*
+ * Read two bytes from the IFC hardware buffer
+ * read function for 16-bit buswith
+ */
+static uint8_t fsl_ifc_read_byte16(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ uint16_t data;
+
+ /*
+ * If there are still bytes in the IFC buffer, then use the
+ * next byte.
+ */
+ if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
+ data = in_be16((uint16_t *)&ifc_nand_ctrl->
+ addr[ifc_nand_ctrl->index]);
+ ifc_nand_ctrl->index += 2;
+ return (uint8_t) data;
+ }
+
+ dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
+ return ERR_BYTE;
+}
+
+/*
+ * Read from the IFC Controller Data Buffer
+ */
+static void fsl_ifc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ int avail;
+
+ if (len < 0) {
+ dev_err(priv->dev, "%s: len %d bytes", __func__, len);
+ return;
+ }
+
+ avail = min((unsigned int)len,
+ ifc_nand_ctrl->read_bytes - ifc_nand_ctrl->index);
+ memcpy_fromio(buf, &ifc_nand_ctrl->addr[ifc_nand_ctrl->index], avail);
+ ifc_nand_ctrl->index += avail;
+
+ if (len > avail)
+ dev_err(priv->dev,
+ "%s: beyond end of buffer (%d requested, %d available)\n",
+ __func__, len, avail);
+}
+
+/*
+ * Verify buffer against the IFC Controller Data Buffer
+ */
+static int fsl_ifc_verify_buf(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
+ int i;
+
+ if (len < 0) {
+ dev_err(priv->dev, "%s: write_buf of %d bytes", __func__, len);
+ return -EINVAL;
+ }
+
+ if ((unsigned int)len > nctrl->read_bytes - nctrl->index) {
+ dev_err(priv->dev,
+ "%s: beyond end of buffer (%d requested, %u available)\n",
+ __func__, len, nctrl->read_bytes - nctrl->index);
+
+ nctrl->index = nctrl->read_bytes;
+ return -EINVAL;
+ }
+
+ for (i = 0; i < len; i++)
+ if (in_8(&nctrl->addr[nctrl->index + i]) != buf[i])
+ break;
+
+ nctrl->index += len;
+
+ if (i != len)
+ return -EIO;
+ if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * This function is called after Program and Erase Operations to
+ * check for success or failure.
+ */
+static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ u32 nand_fsr;
+
+ /* Use READ_STATUS command, but wait for the device to be ready */
+ out_be32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT));
+ out_be32(&ifc->ifc_nand.nand_fcr0, NAND_CMD_STATUS <<
+ IFC_NAND_FCR0_CMD0_SHIFT);
+ out_be32(&ifc->ifc_nand.nand_fbcr, 1);
+ set_addr(mtd, 0, 0, 0);
+ ifc_nand_ctrl->read_bytes = 1;
+
+ fsl_ifc_run_command(mtd);
+
+ nand_fsr = in_be32(&ifc->ifc_nand.nand_fsr);
+
+ /*
+ * The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
+ return nand_fsr | NAND_STATUS_WP;
+}
+
+static int fsl_ifc_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ struct fsl_ifc_mtd *priv = chip->priv;
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+
+ fsl_ifc_read_buf(mtd, buf, mtd->writesize);
+ fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_ECCER)
+ dev_err(priv->dev, "NAND Flash ECC Uncorrectable Error\n");
+
+ if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
+ mtd->ecc_stats.failed++;
+
+ return 0;
+}
+
+/* ECC will be calculated automatically, and errors will be detected in
+ * waitfunc.
+ */
+static void fsl_ifc_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf)
+{
+ fsl_ifc_write_buf(mtd, buf, mtd->writesize);
+ fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+}
+
+static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_ifc_mtd *priv = chip->priv;
+
+ dev_dbg(priv->dev, "%s: nand->numchips = %d\n", __func__,
+ chip->numchips);
+ dev_dbg(priv->dev, "%s: nand->chipsize = %lld\n", __func__,
+ chip->chipsize);
+ dev_dbg(priv->dev, "%s: nand->pagemask = %8x\n", __func__,
+ chip->pagemask);
+ dev_dbg(priv->dev, "%s: nand->chip_delay = %d\n", __func__,
+ chip->chip_delay);
+ dev_dbg(priv->dev, "%s: nand->badblockpos = %d\n", __func__,
+ chip->badblockpos);
+ dev_dbg(priv->dev, "%s: nand->chip_shift = %d\n", __func__,
+ chip->chip_shift);
+ dev_dbg(priv->dev, "%s: nand->page_shift = %d\n", __func__,
+ chip->page_shift);
+ dev_dbg(priv->dev, "%s: nand->phys_erase_shift = %d\n", __func__,
+ chip->phys_erase_shift);
+ dev_dbg(priv->dev, "%s: nand->ecclayout = %p\n", __func__,
+ chip->ecclayout);
+ dev_dbg(priv->dev, "%s: nand->ecc.mode = %d\n", __func__,
+ chip->ecc.mode);
+ dev_dbg(priv->dev, "%s: nand->ecc.steps = %d\n", __func__,
+ chip->ecc.steps);
+ dev_dbg(priv->dev, "%s: nand->ecc.bytes = %d\n", __func__,
+ chip->ecc.bytes);
+ dev_dbg(priv->dev, "%s: nand->ecc.total = %d\n", __func__,
+ chip->ecc.total);
+ dev_dbg(priv->dev, "%s: nand->ecc.layout = %p\n", __func__,
+ chip->ecc.layout);
+ dev_dbg(priv->dev, "%s: mtd->flags = %08x\n", __func__, mtd->flags);
+ dev_dbg(priv->dev, "%s: mtd->size = %lld\n", __func__, mtd->size);
+ dev_dbg(priv->dev, "%s: mtd->erasesize = %d\n", __func__,
+ mtd->erasesize);
+ dev_dbg(priv->dev, "%s: mtd->writesize = %d\n", __func__,
+ mtd->writesize);
+ dev_dbg(priv->dev, "%s: mtd->oobsize = %d\n", __func__,
+ mtd->oobsize);
+
+ return 0;
+}
+
+static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
+{
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ struct nand_chip *chip = &priv->chip;
+ struct nand_ecclayout *layout;
+ u32 csor;
+
+ /* Fill in fsl_ifc_mtd structure */
+ priv->mtd.priv = chip;
+ priv->mtd.owner = THIS_MODULE;
+
+ /* fill in nand_chip structure */
+ /* set up function call table */
+ if ((in_be32(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16)
+ chip->read_byte = fsl_ifc_read_byte16;
+ else
+ chip->read_byte = fsl_ifc_read_byte;
+
+ chip->write_buf = fsl_ifc_write_buf;
+ chip->read_buf = fsl_ifc_read_buf;
+ chip->verify_buf = fsl_ifc_verify_buf;
+ chip->select_chip = fsl_ifc_select_chip;
+ chip->cmdfunc = fsl_ifc_cmdfunc;
+ chip->waitfunc = fsl_ifc_wait;
+
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+
+ out_be32(&ifc->ifc_nand.ncfgr, 0x0);
+
+ /* set up nand options */
+ chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR;
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+
+
+ if (in_be32(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
+ chip->read_byte = fsl_ifc_read_byte16;
+ chip->options |= NAND_BUSWIDTH_16;
+ } else {
+ chip->read_byte = fsl_ifc_read_byte;
+ }
+
+ chip->controller = &ifc_nand_ctrl->controller;
+ chip->priv = priv;
+
+ chip->ecc.read_page = fsl_ifc_read_page;
+ chip->ecc.write_page = fsl_ifc_write_page;
+
+ csor = in_be32(&ifc->csor_cs[priv->bank].csor);
+
+ /* Hardware generates ECC per 512 Bytes */
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 8;
+
+ switch (csor & CSOR_NAND_PGS_MASK) {
+ case CSOR_NAND_PGS_512:
+ if (chip->options & NAND_BUSWIDTH_16) {
+ layout = &oob_512_16bit_ecc4;
+ } else {
+ layout = &oob_512_8bit_ecc4;
+
+ /* Avoid conflict with bad block marker */
+ bbt_main_descr.offs = 0;
+ bbt_mirror_descr.offs = 0;
+ }
+
+ priv->bufnum_mask = 15;
+ break;
+
+ case CSOR_NAND_PGS_2K:
+ layout = &oob_2048_ecc4;
+ priv->bufnum_mask = 3;
+ break;
+
+ case CSOR_NAND_PGS_4K:
+ if ((csor & CSOR_NAND_ECC_MODE_MASK) ==
+ CSOR_NAND_ECC_MODE_4) {
+ layout = &oob_4096_ecc4;
+ } else {
+ layout = &oob_4096_ecc8;
+ chip->ecc.bytes = 16;
+ }
+
+ priv->bufnum_mask = 1;
+ break;
+
+ default:
+ dev_err(priv->dev, "bad csor %#x: bad page size\n", csor);
+ return -ENODEV;
+ }
+
+ /* Must also set CSOR_NAND_ECC_ENC_EN if DEC_EN set */
+ if (csor & CSOR_NAND_ECC_DEC_EN) {
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.layout = layout;
+ } else {
+ chip->ecc.mode = NAND_ECC_SOFT;
+ }
+
+ return 0;
+}
+
+static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)
+{
+ nand_release(&priv->mtd);
+
+ kfree(priv->mtd.name);
+
+ if (priv->vbase)
+ iounmap(priv->vbase);
+
+ ifc_nand_ctrl->chips[priv->bank] = NULL;
+ dev_set_drvdata(priv->dev, NULL);
+ kfree(priv);
+
+ return 0;
+}
+
+static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank,
+ phys_addr_t addr)
+{
+ u32 cspr = in_be32(&ifc->cspr_cs[bank].cspr);
+
+ if (!(cspr & CSPR_V))
+ return 0;
+ if ((cspr & CSPR_MSEL) != CSPR_MSEL_NAND)
+ return 0;
+
+ return (cspr & CSPR_BA) == convert_ifc_address(addr);
+}
+
+static DEFINE_MUTEX(fsl_ifc_nand_mutex);
+
+static int __devinit fsl_ifc_nand_probe(struct platform_device *dev)
+{
+ struct fsl_ifc_regs __iomem *ifc;
+ struct fsl_ifc_mtd *priv;
+ struct resource res;
+ static const char *part_probe_types[]
+ = { "cmdlinepart", "RedBoot", "ofpart", NULL };
+ int ret;
+ int bank;
+ struct device_node *node = dev->dev.of_node;
+ struct mtd_part_parser_data ppdata;
+
+ ppdata.of_node = dev->dev.of_node;
+ if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs)
+ return -ENODEV;
+ ifc = fsl_ifc_ctrl_dev->regs;
+
+ /* get, allocate and map the memory resource */
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(&dev->dev, "%s: failed to get resource\n", __func__);
+ return ret;
+ }
+
+ /* find which chip select it is connected to */
+ for (bank = 0; bank < FSL_IFC_BANK_COUNT; bank++) {
+ if (match_bank(ifc, bank, res.start))
+ break;
+ }
+
+ if (bank >= FSL_IFC_BANK_COUNT) {
+ dev_err(&dev->dev, "%s: address did not match any chip selects\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_lock(&fsl_ifc_nand_mutex);
+ if (!fsl_ifc_ctrl_dev->nand) {
+ ifc_nand_ctrl = kzalloc(sizeof(*ifc_nand_ctrl), GFP_KERNEL);
+ if (!ifc_nand_ctrl) {
+ dev_err(&dev->dev, "failed to allocate memory\n");
+ mutex_unlock(&fsl_ifc_nand_mutex);
+ return -ENOMEM;
+ }
+
+ ifc_nand_ctrl->read_bytes = 0;
+ ifc_nand_ctrl->index = 0;
+ ifc_nand_ctrl->addr = NULL;
+ fsl_ifc_ctrl_dev->nand = ifc_nand_ctrl;
+
+ spin_lock_init(&ifc_nand_ctrl->controller.lock);
+ init_waitqueue_head(&ifc_nand_ctrl->controller.wq);
+ } else {
+ ifc_nand_ctrl = fsl_ifc_ctrl_dev->nand;
+ }
+ mutex_unlock(&fsl_ifc_nand_mutex);
+
+ ifc_nand_ctrl->chips[bank] = priv;
+ priv->bank = bank;
+ priv->ctrl = fsl_ifc_ctrl_dev;
+ priv->dev = &dev->dev;
+
+ priv->vbase = ioremap(res.start, resource_size(&res));
+ if (!priv->vbase) {
+ dev_err(priv->dev, "%s: failed to map chip region\n", __func__);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ dev_set_drvdata(priv->dev, priv);
+
+ out_be32(&ifc->ifc_nand.nand_evter_en,
+ IFC_NAND_EVTER_EN_OPC_EN |
+ IFC_NAND_EVTER_EN_FTOER_EN |
+ IFC_NAND_EVTER_EN_WPER_EN);
+
+ /* enable NAND Machine Interrupts */
+ out_be32(&ifc->ifc_nand.nand_evter_intr_en,
+ IFC_NAND_EVTER_INTR_OPCIR_EN |
+ IFC_NAND_EVTER_INTR_FTOERIR_EN |
+ IFC_NAND_EVTER_INTR_WPERIR_EN);
+
+ priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", (unsigned)res.start);
+ if (!priv->mtd.name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = fsl_ifc_chip_init(priv);
+ if (ret)
+ goto err;
+
+ ret = nand_scan_ident(&priv->mtd, 1, NULL);
+ if (ret)
+ goto err;
+
+ ret = fsl_ifc_chip_init_tail(&priv->mtd);
+ if (ret)
+ goto err;
+
+ ret = nand_scan_tail(&priv->mtd);
+ if (ret)
+ goto err;
+
+ /* First look for RedBoot table or partitions on the command
+ * line, these take precedence over device tree information */
+ mtd_device_parse_register(&priv->mtd, part_probe_types, &ppdata,
+ NULL, 0);
+
+ dev_info(priv->dev, "IFC NAND device at 0x%llx, bank %d\n",
+ (unsigned long long)res.start, priv->bank);
+ return 0;
+
+err:
+ fsl_ifc_chip_remove(priv);
+ return ret;
+}
+
+static int fsl_ifc_nand_remove(struct platform_device *dev)
+{
+ struct fsl_ifc_mtd *priv = dev_get_drvdata(&dev->dev);
+
+ fsl_ifc_chip_remove(priv);
+
+ mutex_lock(&fsl_ifc_nand_mutex);
+ ifc_nand_ctrl->counter--;
+ if (!ifc_nand_ctrl->counter) {
+ fsl_ifc_ctrl_dev->nand = NULL;
+ kfree(ifc_nand_ctrl);
+ }
+ mutex_unlock(&fsl_ifc_nand_mutex);
+
+ return 0;
+}
+
+static const struct of_device_id fsl_ifc_nand_match[] = {
+ {
+ .compatible = "fsl,ifc-nand",
+ },
+ {}
+};
+
+static struct platform_driver fsl_ifc_nand_driver = {
+ .driver = {
+ .name = "fsl,ifc-nand",
+ .owner = THIS_MODULE,
+ .of_match_table = fsl_ifc_nand_match,
+ },
+ .probe = fsl_ifc_nand_probe,
+ .remove = fsl_ifc_nand_remove,
+};
+
+static int __init fsl_ifc_nand_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&fsl_ifc_nand_driver);
+ if (ret)
+ printk(KERN_ERR "fsl-ifc: Failed to register platform"
+ "driver\n");
+
+ return ret;
+}
+
+static void __exit fsl_ifc_nand_exit(void)
+{
+ platform_driver_unregister(&fsl_ifc_nand_driver);
+}
+
+module_init(fsl_ifc_nand_init);
+module_exit(fsl_ifc_nand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Freescale");
+MODULE_DESCRIPTION("Freescale Integrated Flash Controller MTD NAND driver");
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index d3d18e8..4e89103 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -974,9 +974,8 @@ config SCSI_IPS
config SCSI_IBMVSCSI
tristate "IBM Virtual SCSI support"
- depends on PPC_PSERIES || PPC_ISERIES
+ depends on PPC_PSERIES
select SCSI_SRP_ATTRS
- select VIOPATH if PPC_ISERIES
help
This is the IBM POWER Virtual SCSI Client
diff --git a/drivers/scsi/ibmvscsi/Makefile b/drivers/scsi/ibmvscsi/Makefile
index a423d96..ff5b5c5 100644
--- a/drivers/scsi/ibmvscsi/Makefile
+++ b/drivers/scsi/ibmvscsi/Makefile
@@ -1,7 +1,6 @@
obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsic.o
ibmvscsic-y += ibmvscsi.o
-ibmvscsic-$(CONFIG_PPC_ISERIES) += iseries_vscsi.o
ibmvscsic-$(CONFIG_PPC_PSERIES) += rpa_vscsi.o
obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 3d391dc..e984951 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -55,13 +55,7 @@
* and sends a CRQ message back to inform the client that the request has
* completed.
*
- * Note that some of the underlying infrastructure is different between
- * machines conforming to the "RS/6000 Platform Architecture" (RPA) and
- * the older iSeries hypervisor models. To support both, some low level
- * routines have been broken out into rpa_vscsi.c and iseries_vscsi.c.
- * The Makefile should pick one, not two, not zero, of these.
- *
- * TODO: This is currently pretty tied to the IBM i/pSeries hypervisor
+ * TODO: This is currently pretty tied to the IBM pSeries hypervisor
* interfaces. It would be really nice to abstract this above an RDMA
* layer.
*/
@@ -2085,9 +2079,7 @@ int __init ibmvscsi_module_init(void)
driver_template.can_queue = max_requests;
max_events = max_requests + 2;
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- ibmvscsi_ops = &iseriesvscsi_ops;
- else if (firmware_has_feature(FW_FEATURE_VIO))
+ if (firmware_has_feature(FW_FEATURE_VIO))
ibmvscsi_ops = &rpavscsi_ops;
else
return -ENODEV;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 02197a2..c503e17 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -127,7 +127,6 @@ struct ibmvscsi_ops {
int (*resume) (struct ibmvscsi_host_data *hostdata);
};
-extern struct ibmvscsi_ops iseriesvscsi_ops;
extern struct ibmvscsi_ops rpavscsi_ops;
#endif /* IBMVSCSI_H */
diff --git a/drivers/scsi/ibmvscsi/iseries_vscsi.c b/drivers/scsi/ibmvscsi/iseries_vscsi.c
deleted file mode 100644
index f477645..0000000
--- a/drivers/scsi/ibmvscsi/iseries_vscsi.c
+++ /dev/null
@@ -1,173 +0,0 @@
-/* ------------------------------------------------------------
- * iSeries_vscsi.c
- * (C) Copyright IBM Corporation 1994, 2003
- * Authors: Colin DeVilbiss (devilbis@us.ibm.com)
- * Santiago Leon (santil@us.ibm.com)
- * Dave Boutcher (sleddog@us.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
- *
- * ------------------------------------------------------------
- * iSeries-specific functions of the SCSI host adapter for Virtual I/O devices
- *
- * This driver allows the Linux SCSI peripheral drivers to directly
- * access devices in the hosting partition, either on an iSeries
- * hypervisor system or a converged hypervisor system.
- */
-
-#include <asm/iseries/vio.h>
-#include <asm/iseries/hv_lp_event.h>
-#include <asm/iseries/hv_types.h>
-#include <asm/iseries/hv_lp_config.h>
-#include <asm/vio.h>
-#include <linux/device.h>
-#include "ibmvscsi.h"
-
-/* global variables */
-static struct ibmvscsi_host_data *single_host_data;
-
-/* ------------------------------------------------------------
- * Routines for direct interpartition interaction
- */
-struct srp_lp_event {
- struct HvLpEvent lpevt; /* 0x00-0x17 */
- u32 reserved1; /* 0x18-0x1B; unused */
- u16 version; /* 0x1C-0x1D; unused */
- u16 subtype_rc; /* 0x1E-0x1F; unused */
- struct viosrp_crq crq; /* 0x20-0x3F */
-};
-
-/**
- * standard interface for handling logical partition events.
- */
-static void iseriesvscsi_handle_event(struct HvLpEvent *lpevt)
-{
- struct srp_lp_event *evt = (struct srp_lp_event *)lpevt;
-
- if (!evt) {
- printk(KERN_ERR "ibmvscsi: received null event\n");
- return;
- }
-
- if (single_host_data == NULL) {
- printk(KERN_ERR
- "ibmvscsi: received event, no adapter present\n");
- return;
- }
-
- ibmvscsi_handle_crq(&evt->crq, single_host_data);
-}
-
-/* ------------------------------------------------------------
- * Routines for driver initialization
- */
-static int iseriesvscsi_init_crq_queue(struct crq_queue *queue,
- struct ibmvscsi_host_data *hostdata,
- int max_requests)
-{
- int rc;
-
- single_host_data = hostdata;
- rc = viopath_open(viopath_hostLp, viomajorsubtype_scsi, max_requests);
- if (rc < 0) {
- printk("viopath_open failed with rc %d in open_event_path\n",
- rc);
- goto viopath_open_failed;
- }
-
- rc = vio_setHandler(viomajorsubtype_scsi, iseriesvscsi_handle_event);
- if (rc < 0) {
- printk("vio_setHandler failed with rc %d in open_event_path\n",
- rc);
- goto vio_setHandler_failed;
- }
- return 0;
-
- vio_setHandler_failed:
- viopath_close(viopath_hostLp, viomajorsubtype_scsi, max_requests);
- viopath_open_failed:
- return -1;
-}
-
-static void iseriesvscsi_release_crq_queue(struct crq_queue *queue,
- struct ibmvscsi_host_data *hostdata,
- int max_requests)
-{
- vio_clearHandler(viomajorsubtype_scsi);
- viopath_close(viopath_hostLp, viomajorsubtype_scsi, max_requests);
-}
-
-/**
- * reset_crq_queue: - resets a crq after a failure
- * @queue: crq_queue to initialize and register
- * @hostdata: ibmvscsi_host_data of host
- *
- * no-op for iSeries
- */
-static int iseriesvscsi_reset_crq_queue(struct crq_queue *queue,
- struct ibmvscsi_host_data *hostdata)
-{
- return 0;
-}
-
-/**
- * reenable_crq_queue: - reenables a crq after a failure
- * @queue: crq_queue to initialize and register
- * @hostdata: ibmvscsi_host_data of host
- *
- * no-op for iSeries
- */
-static int iseriesvscsi_reenable_crq_queue(struct crq_queue *queue,
- struct ibmvscsi_host_data *hostdata)
-{
- return 0;
-}
-
-/**
- * iseriesvscsi_send_crq: - Send a CRQ
- * @hostdata: the adapter
- * @word1: the first 64 bits of the data
- * @word2: the second 64 bits of the data
- */
-static int iseriesvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
- u64 word1, u64 word2)
-{
- single_host_data = hostdata;
- return HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_scsi,
- HvLpEvent_AckInd_NoAck,
- HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- 0,
- VIOVERSION << 16, word1, word2, 0,
- 0);
-}
-
-static int iseriesvscsi_resume(struct ibmvscsi_host_data *hostdata)
-{
- return 0;
-}
-
-struct ibmvscsi_ops iseriesvscsi_ops = {
- .init_crq_queue = iseriesvscsi_init_crq_queue,
- .release_crq_queue = iseriesvscsi_release_crq_queue,
- .reset_crq_queue = iseriesvscsi_reset_crq_queue,
- .reenable_crq_queue = iseriesvscsi_reenable_crq_queue,
- .send_crq = iseriesvscsi_send_crq,
- .resume = iseriesvscsi_resume,
-};
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
index 4222035..48cb8d3 100644
--- a/drivers/tty/hvc/Kconfig
+++ b/drivers/tty/hvc/Kconfig
@@ -24,16 +24,6 @@ config HVC_OLD_HVSI
depends on HVC_CONSOLE
default n
-config HVC_ISERIES
- bool "iSeries Hypervisor Virtual Console support"
- depends on PPC_ISERIES
- default y
- select HVC_DRIVER
- select HVC_IRQ
- select VIOPATH
- help
- iSeries machines support a hypervisor virtual console.
-
config HVC_OPAL
bool "OPAL Console support"
depends on PPC_POWERNV
@@ -81,6 +71,10 @@ config HVC_UDBG
depends on PPC && EXPERIMENTAL
select HVC_DRIVER
default n
+ help
+ This is meant to be used during HW bring up or debugging when
+ no other console mechanism exist but udbg, to get you a quick
+ console for userspace. Do NOT enable in production kernels.
config HVC_DCC
bool "ARM JTAG DCC console"
diff --git a/drivers/tty/hvc/Makefile b/drivers/tty/hvc/Makefile
index 89abf40b..4ca3723 100644
--- a/drivers/tty/hvc/Makefile
+++ b/drivers/tty/hvc/Makefile
@@ -1,7 +1,6 @@
obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi_lib.o
obj-$(CONFIG_HVC_OPAL) += hvc_opal.o hvsi_lib.o
obj-$(CONFIG_HVC_OLD_HVSI) += hvsi.o
-obj-$(CONFIG_HVC_ISERIES) += hvc_iseries.o
obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o
obj-$(CONFIG_HVC_TILE) += hvc_tile.o
obj-$(CONFIG_HVC_DCC) += hvc_dcc.o
diff --git a/drivers/tty/hvc/hvc_iseries.c b/drivers/tty/hvc/hvc_iseries.c
deleted file mode 100644
index 3f4a897..0000000
--- a/drivers/tty/hvc/hvc_iseries.c
+++ /dev/null
@@ -1,599 +0,0 @@
-/*
- * iSeries vio driver interface to hvc_console.c
- *
- * This code is based heavily on hvc_vio.c and viocons.c
- *
- * Copyright (C) 2006 Stephen Rothwell, IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include <stdarg.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/console.h>
-
-#include <asm/hvconsole.h>
-#include <asm/vio.h>
-#include <asm/prom.h>
-#include <asm/firmware.h>
-#include <asm/iseries/vio.h>
-#include <asm/iseries/hv_call.h>
-#include <asm/iseries/hv_lp_config.h>
-#include <asm/iseries/hv_lp_event.h>
-
-#include "hvc_console.h"
-
-#define VTTY_PORTS 10
-
-static DEFINE_SPINLOCK(consolelock);
-static DEFINE_SPINLOCK(consoleloglock);
-
-static const char hvc_driver_name[] = "hvc_console";
-
-#define IN_BUF_SIZE 200
-
-/*
- * Our port information.
- */
-static struct port_info {
- HvLpIndex lp;
- u64 seq; /* sequence number of last HV send */
- u64 ack; /* last ack from HV */
- struct hvc_struct *hp;
- int in_start;
- int in_end;
- unsigned char in_buf[IN_BUF_SIZE];
-} port_info[VTTY_PORTS] = {
- [ 0 ... VTTY_PORTS - 1 ] = {
- .lp = HvLpIndexInvalid
- }
-};
-
-#define viochar_is_console(pi) ((pi) == &port_info[0])
-
-static struct vio_device_id hvc_driver_table[] __devinitdata = {
- {"serial", "IBM,iSeries-vty"},
- { "", "" }
-};
-MODULE_DEVICE_TABLE(vio, hvc_driver_table);
-
-static void hvlog(char *fmt, ...)
-{
- int i;
- unsigned long flags;
- va_list args;
- static char buf[256];
-
- spin_lock_irqsave(&consoleloglock, flags);
- va_start(args, fmt);
- i = vscnprintf(buf, sizeof(buf) - 1, fmt, args);
- va_end(args);
- buf[i++] = '\r';
- HvCall_writeLogBuffer(buf, i);
- spin_unlock_irqrestore(&consoleloglock, flags);
-}
-
-/*
- * Initialize the common fields in a charLpEvent
- */
-static void init_data_event(struct viocharlpevent *viochar, HvLpIndex lp)
-{
- struct HvLpEvent *hev = &viochar->event;
-
- memset(viochar, 0, sizeof(struct viocharlpevent));
-
- hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DEFERRED_ACK |
- HV_LP_EVENT_INT;
- hev->xType = HvLpEvent_Type_VirtualIo;
- hev->xSubtype = viomajorsubtype_chario | viochardata;
- hev->xSourceLp = HvLpConfig_getLpIndex();
- hev->xTargetLp = lp;
- hev->xSizeMinus1 = sizeof(struct viocharlpevent);
- hev->xSourceInstanceId = viopath_sourceinst(lp);
- hev->xTargetInstanceId = viopath_targetinst(lp);
-}
-
-static int get_chars(uint32_t vtermno, char *buf, int count)
-{
- struct port_info *pi;
- int n = 0;
- unsigned long flags;
-
- if (vtermno >= VTTY_PORTS)
- return -EINVAL;
- if (count == 0)
- return 0;
-
- pi = &port_info[vtermno];
- spin_lock_irqsave(&consolelock, flags);
-
- if (pi->in_end == 0)
- goto done;
-
- n = pi->in_end - pi->in_start;
- if (n > count)
- n = count;
- memcpy(buf, &pi->in_buf[pi->in_start], n);
- pi->in_start += n;
- if (pi->in_start == pi->in_end) {
- pi->in_start = 0;
- pi->in_end = 0;
- }
-done:
- spin_unlock_irqrestore(&consolelock, flags);
- return n;
-}
-
-static int put_chars(uint32_t vtermno, const char *buf, int count)
-{
- struct viocharlpevent *viochar;
- struct port_info *pi;
- HvLpEvent_Rc hvrc;
- unsigned long flags;
- int sent = 0;
-
- if (vtermno >= VTTY_PORTS)
- return -EINVAL;
-
- pi = &port_info[vtermno];
-
- spin_lock_irqsave(&consolelock, flags);
-
- if (viochar_is_console(pi) && !viopath_isactive(pi->lp)) {
- HvCall_writeLogBuffer(buf, count);
- sent = count;
- goto done;
- }
-
- viochar = vio_get_event_buffer(viomajorsubtype_chario);
- if (viochar == NULL) {
- hvlog("\n\rviocons: Can't get viochar buffer.");
- goto done;
- }
-
- while ((count > 0) && ((pi->seq - pi->ack) < VIOCHAR_WINDOW)) {
- int len;
-
- len = (count > VIOCHAR_MAX_DATA) ? VIOCHAR_MAX_DATA : count;
-
- if (viochar_is_console(pi))
- HvCall_writeLogBuffer(buf, len);
-
- init_data_event(viochar, pi->lp);
-
- viochar->len = len;
- viochar->event.xCorrelationToken = pi->seq++;
- viochar->event.xSizeMinus1 =
- offsetof(struct viocharlpevent, data) + len;
-
- memcpy(viochar->data, buf, len);
-
- hvrc = HvCallEvent_signalLpEvent(&viochar->event);
- if (hvrc)
- hvlog("\n\rerror sending event! return code %d\n\r",
- (int)hvrc);
- sent += len;
- count -= len;
- buf += len;
- }
-
- vio_free_event_buffer(viomajorsubtype_chario, viochar);
-done:
- spin_unlock_irqrestore(&consolelock, flags);
- return sent;
-}
-
-static const struct hv_ops hvc_get_put_ops = {
- .get_chars = get_chars,
- .put_chars = put_chars,
- .notifier_add = notifier_add_irq,
- .notifier_del = notifier_del_irq,
- .notifier_hangup = notifier_hangup_irq,
-};
-
-static int __devinit hvc_vio_probe(struct vio_dev *vdev,
- const struct vio_device_id *id)
-{
- struct hvc_struct *hp;
- struct port_info *pi;
-
- /* probed with invalid parameters. */
- if (!vdev || !id)
- return -EPERM;
-
- if (vdev->unit_address >= VTTY_PORTS)
- return -ENODEV;
-
- pi = &port_info[vdev->unit_address];
-
- hp = hvc_alloc(vdev->unit_address, vdev->irq, &hvc_get_put_ops,
- VIOCHAR_MAX_DATA);
- if (IS_ERR(hp))
- return PTR_ERR(hp);
- pi->hp = hp;
- dev_set_drvdata(&vdev->dev, pi);
-
- return 0;
-}
-
-static int __devexit hvc_vio_remove(struct vio_dev *vdev)
-{
- struct port_info *pi = dev_get_drvdata(&vdev->dev);
- struct hvc_struct *hp = pi->hp;
-
- return hvc_remove(hp);
-}
-
-static struct vio_driver hvc_vio_driver = {
- .id_table = hvc_driver_table,
- .probe = hvc_vio_probe,
- .remove = __devexit_p(hvc_vio_remove),
- .driver = {
- .name = hvc_driver_name,
- .owner = THIS_MODULE,
- }
-};
-
-static void hvc_open_event(struct HvLpEvent *event)
-{
- unsigned long flags;
- struct viocharlpevent *cevent = (struct viocharlpevent *)event;
- u8 port = cevent->virtual_device;
- struct port_info *pi;
- int reject = 0;
-
- if (hvlpevent_is_ack(event)) {
- if (port >= VTTY_PORTS)
- return;
-
- spin_lock_irqsave(&consolelock, flags);
-
- pi = &port_info[port];
- if (event->xRc == HvLpEvent_Rc_Good) {
- pi->seq = pi->ack = 0;
- /*
- * This line allows connections from the primary
- * partition but once one is connected from the
- * primary partition nothing short of a reboot
- * of linux will allow access from the hosting
- * partition again without a required iSeries fix.
- */
- pi->lp = event->xTargetLp;
- }
-
- spin_unlock_irqrestore(&consolelock, flags);
- if (event->xRc != HvLpEvent_Rc_Good)
- printk(KERN_WARNING
- "hvc: handle_open_event: event->xRc == (%d).\n",
- event->xRc);
-
- if (event->xCorrelationToken != 0) {
- atomic_t *aptr= (atomic_t *)event->xCorrelationToken;
- atomic_set(aptr, 1);
- } else
- printk(KERN_WARNING
- "hvc: weird...got open ack without atomic\n");
- return;
- }
-
- /* This had better require an ack, otherwise complain */
- if (!hvlpevent_need_ack(event)) {
- printk(KERN_WARNING "hvc: viocharopen without ack bit!\n");
- return;
- }
-
- spin_lock_irqsave(&consolelock, flags);
-
- /* Make sure this is a good virtual tty */
- if (port >= VTTY_PORTS) {
- event->xRc = HvLpEvent_Rc_SubtypeError;
- cevent->subtype_result_code = viorc_openRejected;
- /*
- * Flag state here since we can't printk while holding
- * the consolelock spinlock.
- */
- reject = 1;
- } else {
- pi = &port_info[port];
- if ((pi->lp != HvLpIndexInvalid) &&
- (pi->lp != event->xSourceLp)) {
- /*
- * If this is tty is already connected to a different
- * partition, fail.
- */
- event->xRc = HvLpEvent_Rc_SubtypeError;
- cevent->subtype_result_code = viorc_openRejected;
- reject = 2;
- } else {
- pi->lp = event->xSourceLp;
- event->xRc = HvLpEvent_Rc_Good;
- cevent->subtype_result_code = viorc_good;
- pi->seq = pi->ack = 0;
- }
- }
-
- spin_unlock_irqrestore(&consolelock, flags);
-
- if (reject == 1)
- printk(KERN_WARNING "hvc: open rejected: bad virtual tty.\n");
- else if (reject == 2)
- printk(KERN_WARNING "hvc: open rejected: console in exclusive "
- "use by another partition.\n");
-
- /* Return the acknowledgement */
- HvCallEvent_ackLpEvent(event);
-}
-
-/*
- * Handle a close charLpEvent. This should ONLY be an Interrupt because the
- * virtual console should never actually issue a close event to the hypervisor
- * because the virtual console never goes away. A close event coming from the
- * hypervisor simply means that there are no client consoles connected to the
- * virtual console.
- */
-static void hvc_close_event(struct HvLpEvent *event)
-{
- unsigned long flags;
- struct viocharlpevent *cevent = (struct viocharlpevent *)event;
- u8 port = cevent->virtual_device;
-
- if (!hvlpevent_is_int(event)) {
- printk(KERN_WARNING
- "hvc: got unexpected close acknowledgement\n");
- return;
- }
-
- if (port >= VTTY_PORTS) {
- printk(KERN_WARNING
- "hvc: close message from invalid virtual device.\n");
- return;
- }
-
- /* For closes, just mark the console partition invalid */
- spin_lock_irqsave(&consolelock, flags);
-
- if (port_info[port].lp == event->xSourceLp)
- port_info[port].lp = HvLpIndexInvalid;
-
- spin_unlock_irqrestore(&consolelock, flags);
-}
-
-static void hvc_data_event(struct HvLpEvent *event)
-{
- unsigned long flags;
- struct viocharlpevent *cevent = (struct viocharlpevent *)event;
- struct port_info *pi;
- int n;
- u8 port = cevent->virtual_device;
-
- if (port >= VTTY_PORTS) {
- printk(KERN_WARNING "hvc: data on invalid virtual device %d\n",
- port);
- return;
- }
- if (cevent->len == 0)
- return;
-
- /*
- * Change 05/01/2003 - Ryan Arnold: If a partition other than
- * the current exclusive partition tries to send us data
- * events then just drop them on the floor because we don't
- * want his stinking data. He isn't authorized to receive
- * data because he wasn't the first one to get the console,
- * therefore he shouldn't be allowed to send data either.
- * This will work without an iSeries fix.
- */
- pi = &port_info[port];
- if (pi->lp != event->xSourceLp)
- return;
-
- spin_lock_irqsave(&consolelock, flags);
-
- n = IN_BUF_SIZE - pi->in_end;
- if (n > cevent->len)
- n = cevent->len;
- if (n > 0) {
- memcpy(&pi->in_buf[pi->in_end], cevent->data, n);
- pi->in_end += n;
- }
- spin_unlock_irqrestore(&consolelock, flags);
- if (n == 0)
- printk(KERN_WARNING "hvc: input buffer overflow\n");
-}
-
-static void hvc_ack_event(struct HvLpEvent *event)
-{
- struct viocharlpevent *cevent = (struct viocharlpevent *)event;
- unsigned long flags;
- u8 port = cevent->virtual_device;
-
- if (port >= VTTY_PORTS) {
- printk(KERN_WARNING "hvc: data on invalid virtual device\n");
- return;
- }
-
- spin_lock_irqsave(&consolelock, flags);
- port_info[port].ack = event->xCorrelationToken;
- spin_unlock_irqrestore(&consolelock, flags);
-}
-
-static void hvc_config_event(struct HvLpEvent *event)
-{
- struct viocharlpevent *cevent = (struct viocharlpevent *)event;
-
- if (cevent->data[0] == 0x01)
- printk(KERN_INFO "hvc: window resized to %d: %d: %d: %d\n",
- cevent->data[1], cevent->data[2],
- cevent->data[3], cevent->data[4]);
- else
- printk(KERN_WARNING "hvc: unknown config event\n");
-}
-
-static void hvc_handle_event(struct HvLpEvent *event)
-{
- int charminor;
-
- if (event == NULL)
- return;
-
- charminor = event->xSubtype & VIOMINOR_SUBTYPE_MASK;
- switch (charminor) {
- case viocharopen:
- hvc_open_event(event);
- break;
- case viocharclose:
- hvc_close_event(event);
- break;
- case viochardata:
- hvc_data_event(event);
- break;
- case viocharack:
- hvc_ack_event(event);
- break;
- case viocharconfig:
- hvc_config_event(event);
- break;
- default:
- if (hvlpevent_is_int(event) && hvlpevent_need_ack(event)) {
- event->xRc = HvLpEvent_Rc_InvalidSubtype;
- HvCallEvent_ackLpEvent(event);
- }
- }
-}
-
-static int __init send_open(HvLpIndex remoteLp, void *sem)
-{
- return HvCallEvent_signalLpEventFast(remoteLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_chario | viocharopen,
- HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(remoteLp),
- viopath_targetinst(remoteLp),
- (u64)(unsigned long)sem, VIOVERSION << 16,
- 0, 0, 0, 0);
-}
-
-static int __init hvc_vio_init(void)
-{
- atomic_t wait_flag;
- int rc;
-
- if (!firmware_has_feature(FW_FEATURE_ISERIES))
- return -EIO;
-
- /* +2 for fudge */
- rc = viopath_open(HvLpConfig_getPrimaryLpIndex(),
- viomajorsubtype_chario, VIOCHAR_WINDOW + 2);
- if (rc)
- printk(KERN_WARNING "hvc: error opening to primary %d\n", rc);
-
- if (viopath_hostLp == HvLpIndexInvalid)
- vio_set_hostlp();
-
- /*
- * And if the primary is not the same as the hosting LP, open to the
- * hosting lp
- */
- if ((viopath_hostLp != HvLpIndexInvalid) &&
- (viopath_hostLp != HvLpConfig_getPrimaryLpIndex())) {
- printk(KERN_INFO "hvc: open path to hosting (%d)\n",
- viopath_hostLp);
- rc = viopath_open(viopath_hostLp, viomajorsubtype_chario,
- VIOCHAR_WINDOW + 2); /* +2 for fudge */
- if (rc)
- printk(KERN_WARNING
- "error opening to partition %d: %d\n",
- viopath_hostLp, rc);
- }
-
- if (vio_setHandler(viomajorsubtype_chario, hvc_handle_event) < 0)
- printk(KERN_WARNING
- "hvc: error seting handler for console events!\n");
-
- /*
- * First, try to open the console to the hosting lp.
- * Wait on a semaphore for the response.
- */
- atomic_set(&wait_flag, 0);
- if ((viopath_isactive(viopath_hostLp)) &&
- (send_open(viopath_hostLp, &wait_flag) == 0)) {
- printk(KERN_INFO "hvc: hosting partition %d\n", viopath_hostLp);
- while (atomic_read(&wait_flag) == 0)
- mb();
- atomic_set(&wait_flag, 0);
- }
-
- /*
- * If we don't have an active console, try the primary
- */
- if ((!viopath_isactive(port_info[0].lp)) &&
- (viopath_isactive(HvLpConfig_getPrimaryLpIndex())) &&
- (send_open(HvLpConfig_getPrimaryLpIndex(), &wait_flag) == 0)) {
- printk(KERN_INFO "hvc: opening console to primary partition\n");
- while (atomic_read(&wait_flag) == 0)
- mb();
- }
-
- /* Register as a vio device to receive callbacks */
- rc = vio_register_driver(&hvc_vio_driver);
-
- return rc;
-}
-module_init(hvc_vio_init); /* after drivers/char/hvc_console.c */
-
-static void __exit hvc_vio_exit(void)
-{
- vio_unregister_driver(&hvc_vio_driver);
-}
-module_exit(hvc_vio_exit);
-
-/* the device tree order defines our numbering */
-static int __init hvc_find_vtys(void)
-{
- struct device_node *vty;
- int num_found = 0;
-
- for (vty = of_find_node_by_name(NULL, "vty"); vty != NULL;
- vty = of_find_node_by_name(vty, "vty")) {
- const uint32_t *vtermno;
-
- /* We have statically defined space for only a certain number
- * of console adapters.
- */
- if ((num_found >= MAX_NR_HVC_CONSOLES) ||
- (num_found >= VTTY_PORTS)) {
- of_node_put(vty);
- break;
- }
-
- vtermno = of_get_property(vty, "reg", NULL);
- if (!vtermno)
- continue;
-
- if (!of_device_is_compatible(vty, "IBM,iSeries-vty"))
- continue;
-
- if (num_found == 0)
- add_preferred_console("hvc", 0, NULL);
- hvc_instantiate(*vtermno, num_found, &hvc_get_put_ops);
- ++num_found;
- }
-
- return num_found;
-}
-console_initcall(hvc_find_vtys);
diff --git a/drivers/tty/hvc/hvc_udbg.c b/drivers/tty/hvc/hvc_udbg.c
index 4c9b13e..7222827 100644
--- a/drivers/tty/hvc/hvc_udbg.c
+++ b/drivers/tty/hvc/hvc_udbg.c
@@ -36,7 +36,7 @@ static int hvc_udbg_put(uint32_t vtermno, const char *buf, int count)
{
int i;
- for (i = 0; i < count; i++)
+ for (i = 0; i < count && udbg_putc; i++)
udbg_putc(buf[i]);
return i;
@@ -67,6 +67,9 @@ static int __init hvc_udbg_init(void)
{
struct hvc_struct *hp;
+ if (!udbg_putc)
+ return -ENODEV;
+
BUG_ON(hvc_udbg_dev);
hp = hvc_alloc(0, 0, &hvc_udbg_ops, 16);
@@ -88,6 +91,9 @@ module_exit(hvc_udbg_exit);
static int __init hvc_udbg_console_init(void)
{
+ if (!udbg_putc)
+ return -ENODEV;
+
hvc_instantiate(0, 0, &hvc_udbg_ops);
add_preferred_console("hvc", 0, NULL);
diff --git a/drivers/tty/hvc/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c
index fc3c3ad..3a0d53d 100644
--- a/drivers/tty/hvc/hvc_vio.c
+++ b/drivers/tty/hvc/hvc_vio.c
@@ -46,7 +46,6 @@
#include <asm/hvconsole.h>
#include <asm/vio.h>
#include <asm/prom.h>
-#include <asm/firmware.h>
#include <asm/hvsi.h>
#include <asm/udbg.h>
@@ -322,9 +321,6 @@ static int __init hvc_vio_init(void)
{
int rc;
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- return -EIO;
-
/* Register as a vio device to receive callbacks */
rc = vio_register_driver(&hvc_vio_driver);
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 76e7764..665beb6 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -853,7 +853,7 @@ config SERIAL_MPC52xx_CONSOLE_BAUD
config SERIAL_ICOM
tristate "IBM Multiport Serial Adapter"
- depends on PCI && (PPC_ISERIES || PPC_PSERIES)
+ depends on PCI && PPC_PSERIES
select SERIAL_CORE
select FW_LOADER
help
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index df9e8f0..7e9e8f4 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1039,7 +1039,7 @@ config LANTIQ_WDT
config GEF_WDT
tristate "GE Watchdog Timer"
- depends on GEF_SBC610 || GEF_SBC310 || GEF_PPC9A
+ depends on GE_FPGA
---help---
Watchdog timer found in a number of GE single board computers.