summaryrefslogtreecommitdiff
path: root/drivers/media/platform/soc_camera
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/media/platform/soc_camera')
-rw-r--r--drivers/media/platform/soc_camera/Kconfig87
-rw-r--r--drivers/media/platform/soc_camera/Makefile14
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.c1099
-rw-r--r--drivers/media/platform/soc_camera/mx1_camera.c889
-rw-r--r--drivers/media/platform/soc_camera/mx2_camera.c1863
-rw-r--r--drivers/media/platform/soc_camera/mx3_camera.c1290
-rw-r--r--drivers/media/platform/soc_camera/omap1_camera.c1723
-rw-r--r--drivers/media/platform/soc_camera/pxa_camera.c1852
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c2331
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_csi2.c398
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c1605
-rw-r--r--drivers/media/platform/soc_camera/soc_camera_platform.c206
-rw-r--r--drivers/media/platform/soc_camera/soc_mediabus.c493
13 files changed, 13850 insertions, 0 deletions
diff --git a/drivers/media/platform/soc_camera/Kconfig b/drivers/media/platform/soc_camera/Kconfig
new file mode 100644
index 0000000..9afe1e7
--- /dev/null
+++ b/drivers/media/platform/soc_camera/Kconfig
@@ -0,0 +1,87 @@
+config SOC_CAMERA
+ tristate "SoC camera support"
+ depends on VIDEO_V4L2 && HAS_DMA && I2C
+ select VIDEOBUF_GEN
+ select VIDEOBUF2_CORE
+ help
+ SoC Camera is a common API to several cameras, not connecting
+ over a bus like PCI or USB. For example some i2c camera connected
+ directly to the data bus of an SoC.
+
+config SOC_CAMERA_PLATFORM
+ tristate "platform camera support"
+ depends on SOC_CAMERA
+ help
+ This is a generic SoC camera platform driver, useful for testing
+
+config MX1_VIDEO
+ bool
+
+config VIDEO_MX1
+ tristate "i.MX1/i.MXL CMOS Sensor Interface driver"
+ depends on VIDEO_DEV && ARCH_MX1 && SOC_CAMERA
+ select FIQ
+ select VIDEOBUF_DMA_CONTIG
+ select MX1_VIDEO
+ ---help---
+ This is a v4l2 driver for the i.MX1/i.MXL CMOS Sensor Interface
+
+config MX3_VIDEO
+ bool
+
+config VIDEO_MX3
+ tristate "i.MX3x Camera Sensor Interface driver"
+ depends on VIDEO_DEV && MX3_IPU && SOC_CAMERA
+ select VIDEOBUF2_DMA_CONTIG
+ select MX3_VIDEO
+ ---help---
+ This is a v4l2 driver for the i.MX3x Camera Sensor Interface
+
+config VIDEO_PXA27x
+ tristate "PXA27x Quick Capture Interface driver"
+ depends on VIDEO_DEV && PXA27x && SOC_CAMERA
+ select VIDEOBUF_DMA_SG
+ ---help---
+ This is a v4l2 driver for the PXA27x Quick Capture Interface
+
+config VIDEO_SH_MOBILE_CSI2
+ tristate "SuperH Mobile MIPI CSI-2 Interface driver"
+ depends on VIDEO_DEV && SOC_CAMERA && HAVE_CLK
+ ---help---
+ This is a v4l2 driver for the SuperH MIPI CSI-2 Interface
+
+config VIDEO_SH_MOBILE_CEU
+ tristate "SuperH Mobile CEU Interface driver"
+ depends on VIDEO_DEV && SOC_CAMERA && HAS_DMA && HAVE_CLK
+ select VIDEOBUF2_DMA_CONTIG
+ ---help---
+ This is a v4l2 driver for the SuperH Mobile CEU Interface
+
+config VIDEO_OMAP1
+ tristate "OMAP1 Camera Interface driver"
+ depends on VIDEO_DEV && ARCH_OMAP1 && SOC_CAMERA
+ select VIDEOBUF_DMA_CONTIG
+ select VIDEOBUF_DMA_SG
+ ---help---
+ This is a v4l2 driver for the TI OMAP1 camera interface
+
+config VIDEO_MX2_HOSTSUPPORT
+ bool
+
+config VIDEO_MX2
+ tristate "i.MX27/i.MX25 Camera Sensor Interface driver"
+ depends on VIDEO_DEV && SOC_CAMERA && (MACH_MX27 || (ARCH_MX25 && BROKEN))
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEO_MX2_HOSTSUPPORT
+ ---help---
+ This is a v4l2 driver for the i.MX27 and the i.MX25 Camera Sensor
+ Interface
+
+config VIDEO_ATMEL_ISI
+ tristate "ATMEL Image Sensor Interface (ISI) support"
+ depends on VIDEO_DEV && SOC_CAMERA && ARCH_AT91
+ select VIDEOBUF2_DMA_CONTIG
+ ---help---
+ This module makes the ATMEL Image Sensor Interface available
+ as a v4l2 device.
+
diff --git a/drivers/media/platform/soc_camera/Makefile b/drivers/media/platform/soc_camera/Makefile
new file mode 100644
index 0000000..136b7f8
--- /dev/null
+++ b/drivers/media/platform/soc_camera/Makefile
@@ -0,0 +1,14 @@
+obj-$(CONFIG_SOC_CAMERA) += soc_camera.o soc_mediabus.o
+obj-$(CONFIG_SOC_CAMERA_PLATFORM) += soc_camera_platform.o
+
+# soc-camera host drivers have to be linked after camera drivers
+obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel-isi.o
+obj-$(CONFIG_VIDEO_MX1) += mx1_camera.o
+obj-$(CONFIG_VIDEO_MX2) += mx2_camera.o
+obj-$(CONFIG_VIDEO_MX3) += mx3_camera.o
+obj-$(CONFIG_VIDEO_OMAP1) += omap1_camera.o
+obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o
+obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o
+obj-$(CONFIG_VIDEO_SH_MOBILE_CSI2) += sh_mobile_csi2.o
+
+ccflags-y += -I$(srctree)/drivers/media/i2c/soc_camera
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
new file mode 100644
index 0000000..6274a91
--- /dev/null
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -0,0 +1,1099 @@
+/*
+ * Copyright (c) 2011 Atmel Corporation
+ * Josh Wu, <josh.wu@atmel.com>
+ *
+ * Based on previous work by Lars Haring, <lars.haring@atmel.com>
+ * and Sedji Gaouaou
+ * Based on the bttv driver for Bt848 with respective copyright holders
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <media/atmel-isi.h>
+#include <media/soc_camera.h>
+#include <media/soc_mediabus.h>
+#include <media/videobuf2-dma-contig.h>
+
+#define MAX_BUFFER_NUM 32
+#define MAX_SUPPORT_WIDTH 2048
+#define MAX_SUPPORT_HEIGHT 2048
+#define VID_LIMIT_BYTES (16 * 1024 * 1024)
+#define MIN_FRAME_RATE 15
+#define FRAME_INTERVAL_MILLI_SEC (1000 / MIN_FRAME_RATE)
+
+/* ISI states */
+enum {
+ ISI_STATE_IDLE = 0,
+ ISI_STATE_READY,
+ ISI_STATE_WAIT_SOF,
+};
+
+/* Frame buffer descriptor */
+struct fbd {
+ /* Physical address of the frame buffer */
+ u32 fb_address;
+ /* DMA Control Register(only in HISI2) */
+ u32 dma_ctrl;
+ /* Physical address of the next fbd */
+ u32 next_fbd_address;
+};
+
+static void set_dma_ctrl(struct fbd *fb_desc, u32 ctrl)
+{
+ fb_desc->dma_ctrl = ctrl;
+}
+
+struct isi_dma_desc {
+ struct list_head list;
+ struct fbd *p_fbd;
+ u32 fbd_phys;
+};
+
+/* Frame buffer data */
+struct frame_buffer {
+ struct vb2_buffer vb;
+ struct isi_dma_desc *p_dma_desc;
+ struct list_head list;
+};
+
+struct atmel_isi {
+ /* Protects the access of variables shared with the ISR */
+ spinlock_t lock;
+ void __iomem *regs;
+
+ int sequence;
+ /* State of the ISI module in capturing mode */
+ int state;
+
+ /* Wait queue for waiting for SOF */
+ wait_queue_head_t vsync_wq;
+
+ struct vb2_alloc_ctx *alloc_ctx;
+
+ /* Allocate descriptors for dma buffer use */
+ struct fbd *p_fb_descriptors;
+ u32 fb_descriptors_phys;
+ struct list_head dma_desc_head;
+ struct isi_dma_desc dma_desc[MAX_BUFFER_NUM];
+
+ struct completion complete;
+ /* ISI peripherial clock */
+ struct clk *pclk;
+ /* ISI_MCK, feed to camera sensor to generate pixel clock */
+ struct clk *mck;
+ unsigned int irq;
+
+ struct isi_platform_data *pdata;
+ u16 width_flags; /* max 12 bits */
+
+ struct list_head video_buffer_list;
+ struct frame_buffer *active;
+
+ struct soc_camera_device *icd;
+ struct soc_camera_host soc_host;
+};
+
+static void isi_writel(struct atmel_isi *isi, u32 reg, u32 val)
+{
+ writel(val, isi->regs + reg);
+}
+static u32 isi_readl(struct atmel_isi *isi, u32 reg)
+{
+ return readl(isi->regs + reg);
+}
+
+static int configure_geometry(struct atmel_isi *isi, u32 width,
+ u32 height, enum v4l2_mbus_pixelcode code)
+{
+ u32 cfg2, cr;
+
+ switch (code) {
+ /* YUV, including grey */
+ case V4L2_MBUS_FMT_Y8_1X8:
+ cr = ISI_CFG2_GRAYSCALE;
+ break;
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ cr = ISI_CFG2_YCC_SWAP_MODE_3;
+ break;
+ case V4L2_MBUS_FMT_VYUY8_2X8:
+ cr = ISI_CFG2_YCC_SWAP_MODE_2;
+ break;
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ cr = ISI_CFG2_YCC_SWAP_MODE_1;
+ break;
+ case V4L2_MBUS_FMT_YVYU8_2X8:
+ cr = ISI_CFG2_YCC_SWAP_DEFAULT;
+ break;
+ /* RGB, TODO */
+ default:
+ return -EINVAL;
+ }
+
+ isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
+
+ cfg2 = isi_readl(isi, ISI_CFG2);
+ cfg2 |= cr;
+ /* Set width */
+ cfg2 &= ~(ISI_CFG2_IM_HSIZE_MASK);
+ cfg2 |= ((width - 1) << ISI_CFG2_IM_HSIZE_OFFSET) &
+ ISI_CFG2_IM_HSIZE_MASK;
+ /* Set height */
+ cfg2 &= ~(ISI_CFG2_IM_VSIZE_MASK);
+ cfg2 |= ((height - 1) << ISI_CFG2_IM_VSIZE_OFFSET)
+ & ISI_CFG2_IM_VSIZE_MASK;
+ isi_writel(isi, ISI_CFG2, cfg2);
+
+ return 0;
+}
+
+static irqreturn_t atmel_isi_handle_streaming(struct atmel_isi *isi)
+{
+ if (isi->active) {
+ struct vb2_buffer *vb = &isi->active->vb;
+ struct frame_buffer *buf = isi->active;
+
+ list_del_init(&buf->list);
+ do_gettimeofday(&vb->v4l2_buf.timestamp);
+ vb->v4l2_buf.sequence = isi->sequence++;
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ }
+
+ if (list_empty(&isi->video_buffer_list)) {
+ isi->active = NULL;
+ } else {
+ /* start next dma frame. */
+ isi->active = list_entry(isi->video_buffer_list.next,
+ struct frame_buffer, list);
+ isi_writel(isi, ISI_DMA_C_DSCR,
+ isi->active->p_dma_desc->fbd_phys);
+ isi_writel(isi, ISI_DMA_C_CTRL,
+ ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE);
+ isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH);
+ }
+ return IRQ_HANDLED;
+}
+
+/* ISI interrupt service routine */
+static irqreturn_t isi_interrupt(int irq, void *dev_id)
+{
+ struct atmel_isi *isi = dev_id;
+ u32 status, mask, pending;
+ irqreturn_t ret = IRQ_NONE;
+
+ spin_lock(&isi->lock);
+
+ status = isi_readl(isi, ISI_STATUS);
+ mask = isi_readl(isi, ISI_INTMASK);
+ pending = status & mask;
+
+ if (pending & ISI_CTRL_SRST) {
+ complete(&isi->complete);
+ isi_writel(isi, ISI_INTDIS, ISI_CTRL_SRST);
+ ret = IRQ_HANDLED;
+ } else if (pending & ISI_CTRL_DIS) {
+ complete(&isi->complete);
+ isi_writel(isi, ISI_INTDIS, ISI_CTRL_DIS);
+ ret = IRQ_HANDLED;
+ } else {
+ if ((pending & ISI_SR_VSYNC) &&
+ (isi->state == ISI_STATE_IDLE)) {
+ isi->state = ISI_STATE_READY;
+ wake_up_interruptible(&isi->vsync_wq);
+ ret = IRQ_HANDLED;
+ }
+ if (likely(pending & ISI_SR_CXFR_DONE))
+ ret = atmel_isi_handle_streaming(isi);
+ }
+
+ spin_unlock(&isi->lock);
+ return ret;
+}
+
+#define WAIT_ISI_RESET 1
+#define WAIT_ISI_DISABLE 0
+static int atmel_isi_wait_status(struct atmel_isi *isi, int wait_reset)
+{
+ unsigned long timeout;
+ /*
+ * The reset or disable will only succeed if we have a
+ * pixel clock from the camera.
+ */
+ init_completion(&isi->complete);
+
+ if (wait_reset) {
+ isi_writel(isi, ISI_INTEN, ISI_CTRL_SRST);
+ isi_writel(isi, ISI_CTRL, ISI_CTRL_SRST);
+ } else {
+ isi_writel(isi, ISI_INTEN, ISI_CTRL_DIS);
+ isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
+ }
+
+ timeout = wait_for_completion_timeout(&isi->complete,
+ msecs_to_jiffies(100));
+ if (timeout == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------
+ Videobuf operations
+ ------------------------------------------------------------------*/
+static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+ unsigned long size;
+ int ret;
+
+ /* Reset ISI */
+ ret = atmel_isi_wait_status(isi, WAIT_ISI_RESET);
+ if (ret < 0) {
+ dev_err(icd->parent, "Reset ISI timed out\n");
+ return ret;
+ }
+ /* Disable all interrupts */
+ isi_writel(isi, ISI_INTDIS, ~0UL);
+
+ size = icd->sizeimage;
+
+ if (!*nbuffers || *nbuffers > MAX_BUFFER_NUM)
+ *nbuffers = MAX_BUFFER_NUM;
+
+ if (size * *nbuffers > VID_LIMIT_BYTES)
+ *nbuffers = VID_LIMIT_BYTES / size;
+
+ *nplanes = 1;
+ sizes[0] = size;
+ alloc_ctxs[0] = isi->alloc_ctx;
+
+ isi->sequence = 0;
+ isi->active = NULL;
+
+ dev_dbg(icd->parent, "%s, count=%d, size=%ld\n", __func__,
+ *nbuffers, size);
+
+ return 0;
+}
+
+static int buffer_init(struct vb2_buffer *vb)
+{
+ struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb);
+
+ buf->p_dma_desc = NULL;
+ INIT_LIST_HEAD(&buf->list);
+
+ return 0;
+}
+
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+ struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+ unsigned long size;
+ struct isi_dma_desc *desc;
+
+ size = icd->sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dev_err(icd->parent, "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(&buf->vb, 0, size);
+
+ if (!buf->p_dma_desc) {
+ if (list_empty(&isi->dma_desc_head)) {
+ dev_err(icd->parent, "Not enough dma descriptors.\n");
+ return -EINVAL;
+ } else {
+ /* Get an available descriptor */
+ desc = list_entry(isi->dma_desc_head.next,
+ struct isi_dma_desc, list);
+ /* Delete the descriptor since now it is used */
+ list_del_init(&desc->list);
+
+ /* Initialize the dma descriptor */
+ desc->p_fbd->fb_address =
+ vb2_dma_contig_plane_dma_addr(vb, 0);
+ desc->p_fbd->next_fbd_address = 0;
+ set_dma_ctrl(desc->p_fbd, ISI_DMA_CTRL_WB);
+
+ buf->p_dma_desc = desc;
+ }
+ }
+ return 0;
+}
+
+static void buffer_cleanup(struct vb2_buffer *vb)
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+ struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb);
+
+ /* This descriptor is available now and we add to head list */
+ if (buf->p_dma_desc)
+ list_add(&buf->p_dma_desc->list, &isi->dma_desc_head);
+}
+
+static void start_dma(struct atmel_isi *isi, struct frame_buffer *buffer)
+{
+ u32 ctrl, cfg1;
+
+ cfg1 = isi_readl(isi, ISI_CFG1);
+ /* Enable irq: cxfr for the codec path, pxfr for the preview path */
+ isi_writel(isi, ISI_INTEN,
+ ISI_SR_CXFR_DONE | ISI_SR_PXFR_DONE);
+
+ /* Check if already in a frame */
+ if (isi_readl(isi, ISI_STATUS) & ISI_CTRL_CDC) {
+ dev_err(isi->icd->parent, "Already in frame handling.\n");
+ return;
+ }
+
+ isi_writel(isi, ISI_DMA_C_DSCR, buffer->p_dma_desc->fbd_phys);
+ isi_writel(isi, ISI_DMA_C_CTRL, ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE);
+ isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH);
+
+ /* Enable linked list */
+ cfg1 |= isi->pdata->frate | ISI_CFG1_DISCR;
+
+ /* Enable codec path and ISI */
+ ctrl = ISI_CTRL_CDC | ISI_CTRL_EN;
+ isi_writel(isi, ISI_CTRL, ctrl);
+ isi_writel(isi, ISI_CFG1, cfg1);
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+ struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&isi->lock, flags);
+ list_add_tail(&buf->list, &isi->video_buffer_list);
+
+ if (isi->active == NULL) {
+ isi->active = buf;
+ if (vb2_is_streaming(vb->vb2_queue))
+ start_dma(isi, buf);
+ }
+ spin_unlock_irqrestore(&isi->lock, flags);
+}
+
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+
+ u32 sr = 0;
+ int ret;
+
+ spin_lock_irq(&isi->lock);
+ isi->state = ISI_STATE_IDLE;
+ /* Clear any pending SOF interrupt */
+ sr = isi_readl(isi, ISI_STATUS);
+ /* Enable VSYNC interrupt for SOF */
+ isi_writel(isi, ISI_INTEN, ISI_SR_VSYNC);
+ isi_writel(isi, ISI_CTRL, ISI_CTRL_EN);
+ spin_unlock_irq(&isi->lock);
+
+ dev_dbg(icd->parent, "Waiting for SOF\n");
+ ret = wait_event_interruptible(isi->vsync_wq,
+ isi->state != ISI_STATE_IDLE);
+ if (ret)
+ goto err;
+
+ if (isi->state != ISI_STATE_READY) {
+ ret = -EIO;
+ goto err;
+ }
+
+ spin_lock_irq(&isi->lock);
+ isi->state = ISI_STATE_WAIT_SOF;
+ isi_writel(isi, ISI_INTDIS, ISI_SR_VSYNC);
+ if (count)
+ start_dma(isi, isi->active);
+ spin_unlock_irq(&isi->lock);
+
+ return 0;
+err:
+ isi->active = NULL;
+ isi->sequence = 0;
+ INIT_LIST_HEAD(&isi->video_buffer_list);
+ return ret;
+}
+
+/* abort streaming and wait for last buffer */
+static int stop_streaming(struct vb2_queue *vq)
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+ struct frame_buffer *buf, *node;
+ int ret = 0;
+ unsigned long timeout;
+
+ spin_lock_irq(&isi->lock);
+ isi->active = NULL;
+ /* Release all active buffers */
+ list_for_each_entry_safe(buf, node, &isi->video_buffer_list, list) {
+ list_del_init(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irq(&isi->lock);
+
+ timeout = jiffies + FRAME_INTERVAL_MILLI_SEC * HZ;
+ /* Wait until the end of the current frame. */
+ while ((isi_readl(isi, ISI_STATUS) & ISI_CTRL_CDC) &&
+ time_before(jiffies, timeout))
+ msleep(1);
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(icd->parent,
+ "Timeout waiting for finishing codec request\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Disable interrupts */
+ isi_writel(isi, ISI_INTDIS,
+ ISI_SR_CXFR_DONE | ISI_SR_PXFR_DONE);
+
+ /* Disable ISI and wait for it is done */
+ ret = atmel_isi_wait_status(isi, WAIT_ISI_DISABLE);
+ if (ret < 0)
+ dev_err(icd->parent, "Disable ISI timed out\n");
+
+ return ret;
+}
+
+static struct vb2_ops isi_video_qops = {
+ .queue_setup = queue_setup,
+ .buf_init = buffer_init,
+ .buf_prepare = buffer_prepare,
+ .buf_cleanup = buffer_cleanup,
+ .buf_queue = buffer_queue,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+ .wait_prepare = soc_camera_unlock,
+ .wait_finish = soc_camera_lock,
+};
+
+/* ------------------------------------------------------------------
+ SOC camera operations for the device
+ ------------------------------------------------------------------*/
+static int isi_camera_init_videobuf(struct vb2_queue *q,
+ struct soc_camera_device *icd)
+{
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP;
+ q->drv_priv = icd;
+ q->buf_struct_size = sizeof(struct frame_buffer);
+ q->ops = &isi_video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+
+ return vb2_queue_init(q);
+}
+
+static int isi_camera_set_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ const struct soc_camera_format_xlate *xlate;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_mbus_framefmt mf;
+ int ret;
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+ if (!xlate) {
+ dev_warn(icd->parent, "Format %x not found\n",
+ pix->pixelformat);
+ return -EINVAL;
+ }
+
+ dev_dbg(icd->parent, "Plan to set format %dx%d\n",
+ pix->width, pix->height);
+
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
+
+ ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ if (mf.code != xlate->code)
+ return -EINVAL;
+
+ ret = configure_geometry(isi, pix->width, pix->height, xlate->code);
+ if (ret < 0)
+ return ret;
+
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->field = mf.field;
+ pix->colorspace = mf.colorspace;
+ icd->current_fmt = xlate;
+
+ dev_dbg(icd->parent, "Finally set format %dx%d\n",
+ pix->width, pix->height);
+
+ return ret;
+}
+
+static int isi_camera_try_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ const struct soc_camera_format_xlate *xlate;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_mbus_framefmt mf;
+ u32 pixfmt = pix->pixelformat;
+ int ret;
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+ if (pixfmt && !xlate) {
+ dev_warn(icd->parent, "Format %x not found\n", pixfmt);
+ return -EINVAL;
+ }
+
+ /* limit to Atmel ISI hardware capabilities */
+ if (pix->height > MAX_SUPPORT_HEIGHT)
+ pix->height = MAX_SUPPORT_HEIGHT;
+ if (pix->width > MAX_SUPPORT_WIDTH)
+ pix->width = MAX_SUPPORT_WIDTH;
+
+ /* limit to sensor capabilities */
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
+
+ ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->colorspace = mf.colorspace;
+
+ switch (mf.field) {
+ case V4L2_FIELD_ANY:
+ pix->field = V4L2_FIELD_NONE;
+ break;
+ case V4L2_FIELD_NONE:
+ break;
+ default:
+ dev_err(icd->parent, "Field type %d unsupported.\n",
+ mf.field);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct soc_mbus_pixelfmt isi_camera_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .name = "Packed YUV422 16 bit",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+};
+
+/* This will be corrected as we get more formats */
+static bool isi_camera_packing_supported(const struct soc_mbus_pixelfmt *fmt)
+{
+ return fmt->packing == SOC_MBUS_PACKING_NONE ||
+ (fmt->bits_per_sample == 8 &&
+ fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) ||
+ (fmt->bits_per_sample > 8 &&
+ fmt->packing == SOC_MBUS_PACKING_EXTEND16);
+}
+
+#define ISI_BUS_PARAM (V4L2_MBUS_MASTER | \
+ V4L2_MBUS_HSYNC_ACTIVE_HIGH | \
+ V4L2_MBUS_HSYNC_ACTIVE_LOW | \
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH | \
+ V4L2_MBUS_VSYNC_ACTIVE_LOW | \
+ V4L2_MBUS_PCLK_SAMPLE_RISING | \
+ V4L2_MBUS_PCLK_SAMPLE_FALLING | \
+ V4L2_MBUS_DATA_ACTIVE_HIGH)
+
+static int isi_camera_try_bus_param(struct soc_camera_device *icd,
+ unsigned char buswidth)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+ struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ unsigned long common_flags;
+ int ret;
+
+ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+ if (!ret) {
+ common_flags = soc_mbus_config_compatible(&cfg,
+ ISI_BUS_PARAM);
+ if (!common_flags) {
+ dev_warn(icd->parent,
+ "Flags incompatible: camera 0x%x, host 0x%x\n",
+ cfg.flags, ISI_BUS_PARAM);
+ return -EINVAL;
+ }
+ } else if (ret != -ENOIOCTLCMD) {
+ return ret;
+ }
+
+ if ((1 << (buswidth - 1)) & isi->width_flags)
+ return 0;
+ return -EINVAL;
+}
+
+
+static int isi_camera_get_formats(struct soc_camera_device *icd,
+ unsigned int idx,
+ struct soc_camera_format_xlate *xlate)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ int formats = 0, ret;
+ /* sensor format */
+ enum v4l2_mbus_pixelcode code;
+ /* soc camera host format */
+ const struct soc_mbus_pixelfmt *fmt;
+
+ ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
+ if (ret < 0)
+ /* No more formats */
+ return 0;
+
+ fmt = soc_mbus_get_fmtdesc(code);
+ if (!fmt) {
+ dev_err(icd->parent,
+ "Invalid format code #%u: %d\n", idx, code);
+ return 0;
+ }
+
+ /* This also checks support for the requested bits-per-sample */
+ ret = isi_camera_try_bus_param(icd, fmt->bits_per_sample);
+ if (ret < 0) {
+ dev_err(icd->parent,
+ "Fail to try the bus parameters.\n");
+ return 0;
+ }
+
+ switch (code) {
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ case V4L2_MBUS_FMT_VYUY8_2X8:
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ case V4L2_MBUS_FMT_YVYU8_2X8:
+ formats++;
+ if (xlate) {
+ xlate->host_fmt = &isi_camera_formats[0];
+ xlate->code = code;
+ xlate++;
+ dev_dbg(icd->parent, "Providing format %s using code %d\n",
+ isi_camera_formats[0].name, code);
+ }
+ break;
+ default:
+ if (!isi_camera_packing_supported(fmt))
+ return 0;
+ if (xlate)
+ dev_dbg(icd->parent,
+ "Providing format %s in pass-through mode\n",
+ fmt->name);
+ }
+
+ /* Generic pass-through */
+ formats++;
+ if (xlate) {
+ xlate->host_fmt = fmt;
+ xlate->code = code;
+ xlate++;
+ }
+
+ return formats;
+}
+
+/* Called with .video_lock held */
+static int isi_camera_add_device(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+ int ret;
+
+ if (isi->icd)
+ return -EBUSY;
+
+ ret = clk_enable(isi->pclk);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(isi->mck);
+ if (ret) {
+ clk_disable(isi->pclk);
+ return ret;
+ }
+
+ isi->icd = icd;
+ dev_dbg(icd->parent, "Atmel ISI Camera driver attached to camera %d\n",
+ icd->devnum);
+ return 0;
+}
+/* Called with .video_lock held */
+static void isi_camera_remove_device(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+
+ BUG_ON(icd != isi->icd);
+
+ clk_disable(isi->mck);
+ clk_disable(isi->pclk);
+ isi->icd = NULL;
+
+ dev_dbg(icd->parent, "Atmel ISI Camera driver detached from camera %d\n",
+ icd->devnum);
+}
+
+static unsigned int isi_camera_poll(struct file *file, poll_table *pt)
+{
+ struct soc_camera_device *icd = file->private_data;
+
+ return vb2_poll(&icd->vb2_vidq, file, pt);
+}
+
+static int isi_camera_querycap(struct soc_camera_host *ici,
+ struct v4l2_capability *cap)
+{
+ strcpy(cap->driver, "atmel-isi");
+ strcpy(cap->card, "Atmel Image Sensor Interface");
+ cap->capabilities = (V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_STREAMING);
+ return 0;
+}
+
+static int isi_camera_set_bus_param(struct soc_camera_device *icd)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+ struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ unsigned long common_flags;
+ int ret;
+ u32 cfg1 = 0;
+
+ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+ if (!ret) {
+ common_flags = soc_mbus_config_compatible(&cfg,
+ ISI_BUS_PARAM);
+ if (!common_flags) {
+ dev_warn(icd->parent,
+ "Flags incompatible: camera 0x%x, host 0x%x\n",
+ cfg.flags, ISI_BUS_PARAM);
+ return -EINVAL;
+ }
+ } else if (ret != -ENOIOCTLCMD) {
+ return ret;
+ } else {
+ common_flags = ISI_BUS_PARAM;
+ }
+ dev_dbg(icd->parent, "Flags cam: 0x%x host: 0x%x common: 0x%lx\n",
+ cfg.flags, ISI_BUS_PARAM, common_flags);
+
+ /* Make choises, based on platform preferences */
+ if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
+ if (isi->pdata->hsync_act_low)
+ common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH;
+ else
+ common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW;
+ }
+
+ if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) {
+ if (isi->pdata->vsync_act_low)
+ common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH;
+ else
+ common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW;
+ }
+
+ if ((common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) &&
+ (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)) {
+ if (isi->pdata->pclk_act_falling)
+ common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_RISING;
+ else
+ common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_FALLING;
+ }
+
+ cfg.flags = common_flags;
+ ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_dbg(icd->parent, "camera s_mbus_config(0x%lx) returned %d\n",
+ common_flags, ret);
+ return ret;
+ }
+
+ /* set bus param for ISI */
+ if (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ cfg1 |= ISI_CFG1_HSYNC_POL_ACTIVE_LOW;
+ if (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ cfg1 |= ISI_CFG1_VSYNC_POL_ACTIVE_LOW;
+ if (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ cfg1 |= ISI_CFG1_PIXCLK_POL_ACTIVE_FALLING;
+
+ if (isi->pdata->has_emb_sync)
+ cfg1 |= ISI_CFG1_EMB_SYNC;
+ if (isi->pdata->full_mode)
+ cfg1 |= ISI_CFG1_FULL_MODE;
+
+ isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
+ isi_writel(isi, ISI_CFG1, cfg1);
+
+ return 0;
+}
+
+static struct soc_camera_host_ops isi_soc_camera_host_ops = {
+ .owner = THIS_MODULE,
+ .add = isi_camera_add_device,
+ .remove = isi_camera_remove_device,
+ .set_fmt = isi_camera_set_fmt,
+ .try_fmt = isi_camera_try_fmt,
+ .get_formats = isi_camera_get_formats,
+ .init_videobuf2 = isi_camera_init_videobuf,
+ .poll = isi_camera_poll,
+ .querycap = isi_camera_querycap,
+ .set_bus_param = isi_camera_set_bus_param,
+};
+
+/* -----------------------------------------------------------------------*/
+static int __devexit atmel_isi_remove(struct platform_device *pdev)
+{
+ struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
+ struct atmel_isi *isi = container_of(soc_host,
+ struct atmel_isi, soc_host);
+
+ free_irq(isi->irq, isi);
+ soc_camera_host_unregister(soc_host);
+ vb2_dma_contig_cleanup_ctx(isi->alloc_ctx);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct fbd) * MAX_BUFFER_NUM,
+ isi->p_fb_descriptors,
+ isi->fb_descriptors_phys);
+
+ iounmap(isi->regs);
+ clk_unprepare(isi->mck);
+ clk_put(isi->mck);
+ clk_unprepare(isi->pclk);
+ clk_put(isi->pclk);
+ kfree(isi);
+
+ return 0;
+}
+
+static int __devinit atmel_isi_probe(struct platform_device *pdev)
+{
+ unsigned int irq;
+ struct atmel_isi *isi;
+ struct clk *pclk;
+ struct resource *regs;
+ int ret, i;
+ struct device *dev = &pdev->dev;
+ struct soc_camera_host *soc_host;
+ struct isi_platform_data *pdata;
+
+ pdata = dev->platform_data;
+ if (!pdata || !pdata->data_width_flags || !pdata->mck_hz) {
+ dev_err(&pdev->dev,
+ "No config available for Atmel ISI\n");
+ return -EINVAL;
+ }
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs)
+ return -ENXIO;
+
+ pclk = clk_get(&pdev->dev, "isi_clk");
+ if (IS_ERR(pclk))
+ return PTR_ERR(pclk);
+
+ ret = clk_prepare(pclk);
+ if (ret)
+ goto err_clk_prepare_pclk;
+
+ isi = kzalloc(sizeof(struct atmel_isi), GFP_KERNEL);
+ if (!isi) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "Can't allocate interface!\n");
+ goto err_alloc_isi;
+ }
+
+ isi->pclk = pclk;
+ isi->pdata = pdata;
+ isi->active = NULL;
+ spin_lock_init(&isi->lock);
+ init_waitqueue_head(&isi->vsync_wq);
+ INIT_LIST_HEAD(&isi->video_buffer_list);
+ INIT_LIST_HEAD(&isi->dma_desc_head);
+
+ /* Get ISI_MCK, provided by programmable clock or external clock */
+ isi->mck = clk_get(dev, "isi_mck");
+ if (IS_ERR(isi->mck)) {
+ dev_err(dev, "Failed to get isi_mck\n");
+ ret = PTR_ERR(isi->mck);
+ goto err_clk_get;
+ }
+
+ ret = clk_prepare(isi->mck);
+ if (ret)
+ goto err_clk_prepare_mck;
+
+ /* Set ISI_MCK's frequency, it should be faster than pixel clock */
+ ret = clk_set_rate(isi->mck, pdata->mck_hz);
+ if (ret < 0)
+ goto err_set_mck_rate;
+
+ isi->p_fb_descriptors = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct fbd) * MAX_BUFFER_NUM,
+ &isi->fb_descriptors_phys,
+ GFP_KERNEL);
+ if (!isi->p_fb_descriptors) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "Can't allocate descriptors!\n");
+ goto err_alloc_descriptors;
+ }
+
+ for (i = 0; i < MAX_BUFFER_NUM; i++) {
+ isi->dma_desc[i].p_fbd = isi->p_fb_descriptors + i;
+ isi->dma_desc[i].fbd_phys = isi->fb_descriptors_phys +
+ i * sizeof(struct fbd);
+ list_add(&isi->dma_desc[i].list, &isi->dma_desc_head);
+ }
+
+ isi->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+ if (IS_ERR(isi->alloc_ctx)) {
+ ret = PTR_ERR(isi->alloc_ctx);
+ goto err_alloc_ctx;
+ }
+
+ isi->regs = ioremap(regs->start, resource_size(regs));
+ if (!isi->regs) {
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ if (pdata->data_width_flags & ISI_DATAWIDTH_8)
+ isi->width_flags = 1 << 7;
+ if (pdata->data_width_flags & ISI_DATAWIDTH_10)
+ isi->width_flags |= 1 << 9;
+
+ isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err_req_irq;
+ }
+
+ ret = request_irq(irq, isi_interrupt, 0, "isi", isi);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
+ goto err_req_irq;
+ }
+ isi->irq = irq;
+
+ soc_host = &isi->soc_host;
+ soc_host->drv_name = "isi-camera";
+ soc_host->ops = &isi_soc_camera_host_ops;
+ soc_host->priv = isi;
+ soc_host->v4l2_dev.dev = &pdev->dev;
+ soc_host->nr = pdev->id;
+
+ ret = soc_camera_host_register(soc_host);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to register soc camera host\n");
+ goto err_register_soc_camera_host;
+ }
+ return 0;
+
+err_register_soc_camera_host:
+ free_irq(isi->irq, isi);
+err_req_irq:
+ iounmap(isi->regs);
+err_ioremap:
+ vb2_dma_contig_cleanup_ctx(isi->alloc_ctx);
+err_alloc_ctx:
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct fbd) * MAX_BUFFER_NUM,
+ isi->p_fb_descriptors,
+ isi->fb_descriptors_phys);
+err_alloc_descriptors:
+err_set_mck_rate:
+ clk_unprepare(isi->mck);
+err_clk_prepare_mck:
+ clk_put(isi->mck);
+err_clk_get:
+ kfree(isi);
+err_alloc_isi:
+ clk_unprepare(pclk);
+err_clk_prepare_pclk:
+ clk_put(pclk);
+
+ return ret;
+}
+
+static struct platform_driver atmel_isi_driver = {
+ .probe = atmel_isi_probe,
+ .remove = __devexit_p(atmel_isi_remove),
+ .driver = {
+ .name = "atmel_isi",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init atmel_isi_init_module(void)
+{
+ return platform_driver_probe(&atmel_isi_driver, &atmel_isi_probe);
+}
+
+static void __exit atmel_isi_exit(void)
+{
+ platform_driver_unregister(&atmel_isi_driver);
+}
+module_init(atmel_isi_init_module);
+module_exit(atmel_isi_exit);
+
+MODULE_AUTHOR("Josh Wu <josh.wu@atmel.com>");
+MODULE_DESCRIPTION("The V4L2 driver for Atmel Linux");
+MODULE_LICENSE("GPL");
+MODULE_SUPPORTED_DEVICE("video");
diff --git a/drivers/media/platform/soc_camera/mx1_camera.c b/drivers/media/platform/soc_camera/mx1_camera.c
new file mode 100644
index 0000000..bbe7099
--- /dev/null
+++ b/drivers/media/platform/soc_camera/mx1_camera.c
@@ -0,0 +1,889 @@
+/*
+ * V4L2 Driver for i.MXL/i.MXL camera (CSI) host
+ *
+ * Copyright (C) 2008, Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+ * Copyright (C) 2009, Darius Augulis <augulis.darius@gmail.com>
+ *
+ * Based on PXA SoC camera driver
+ * Copyright (C) 2006, Sascha Hauer, Pengutronix
+ * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/videodev2.h>
+
+#include <media/soc_camera.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-dev.h>
+#include <media/videobuf-dma-contig.h>
+#include <media/soc_mediabus.h>
+
+#include <asm/dma.h>
+#include <asm/fiq.h>
+#include <mach/dma-mx1-mx2.h>
+#include <mach/hardware.h>
+#include <mach/irqs.h>
+#include <linux/platform_data/camera-mx1.h>
+
+/*
+ * CSI registers
+ */
+#define CSICR1 0x00 /* CSI Control Register 1 */
+#define CSISR 0x08 /* CSI Status Register */
+#define CSIRXR 0x10 /* CSI RxFIFO Register */
+
+#define CSICR1_RXFF_LEVEL(x) (((x) & 0x3) << 19)
+#define CSICR1_SOF_POL (1 << 17)
+#define CSICR1_SOF_INTEN (1 << 16)
+#define CSICR1_MCLKDIV(x) (((x) & 0xf) << 12)
+#define CSICR1_MCLKEN (1 << 9)
+#define CSICR1_FCC (1 << 8)
+#define CSICR1_BIG_ENDIAN (1 << 7)
+#define CSICR1_CLR_RXFIFO (1 << 5)
+#define CSICR1_GCLK_MODE (1 << 4)
+#define CSICR1_DATA_POL (1 << 2)
+#define CSICR1_REDGE (1 << 1)
+#define CSICR1_EN (1 << 0)
+
+#define CSISR_SFF_OR_INT (1 << 25)
+#define CSISR_RFF_OR_INT (1 << 24)
+#define CSISR_STATFF_INT (1 << 21)
+#define CSISR_RXFF_INT (1 << 18)
+#define CSISR_SOF_INT (1 << 16)
+#define CSISR_DRDY (1 << 0)
+
+#define DRIVER_VERSION "0.0.2"
+#define DRIVER_NAME "mx1-camera"
+
+#define CSI_IRQ_MASK (CSISR_SFF_OR_INT | CSISR_RFF_OR_INT | \
+ CSISR_STATFF_INT | CSISR_RXFF_INT | CSISR_SOF_INT)
+
+#define CSI_BUS_FLAGS (V4L2_MBUS_MASTER | V4L2_MBUS_HSYNC_ACTIVE_HIGH | \
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW | \
+ V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING | \
+ V4L2_MBUS_DATA_ACTIVE_HIGH | V4L2_MBUS_DATA_ACTIVE_LOW)
+
+#define MAX_VIDEO_MEM 16 /* Video memory limit in megabytes */
+
+/*
+ * Structures
+ */
+
+/* buffer for one video frame */
+struct mx1_buffer {
+ /* common v4l buffer stuff -- must be first */
+ struct videobuf_buffer vb;
+ enum v4l2_mbus_pixelcode code;
+ int inwork;
+};
+
+/*
+ * i.MX1/i.MXL is only supposed to handle one camera on its Camera Sensor
+ * Interface. If anyone ever builds hardware to enable more than
+ * one camera, they will have to modify this driver too
+ */
+struct mx1_camera_dev {
+ struct soc_camera_host soc_host;
+ struct soc_camera_device *icd;
+ struct mx1_camera_pdata *pdata;
+ struct mx1_buffer *active;
+ struct resource *res;
+ struct clk *clk;
+ struct list_head capture;
+
+ void __iomem *base;
+ int dma_chan;
+ unsigned int irq;
+ unsigned long mclk;
+
+ spinlock_t lock;
+};
+
+/*
+ * Videobuf operations
+ */
+static int mx1_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
+ unsigned int *size)
+{
+ struct soc_camera_device *icd = vq->priv_data;
+
+ *size = icd->sizeimage;
+
+ if (!*count)
+ *count = 32;
+
+ if (*size * *count > MAX_VIDEO_MEM * 1024 * 1024)
+ *count = (MAX_VIDEO_MEM * 1024 * 1024) / *size;
+
+ dev_dbg(icd->parent, "count=%d, size=%d\n", *count, *size);
+
+ return 0;
+}
+
+static void free_buffer(struct videobuf_queue *vq, struct mx1_buffer *buf)
+{
+ struct soc_camera_device *icd = vq->priv_data;
+ struct videobuf_buffer *vb = &buf->vb;
+
+ BUG_ON(in_interrupt());
+
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+ vb, vb->baddr, vb->bsize);
+
+ /*
+ * This waits until this buffer is out of danger, i.e., until it is no
+ * longer in STATE_QUEUED or STATE_ACTIVE
+ */
+ videobuf_waiton(vq, vb, 0, 0);
+ videobuf_dma_contig_free(vq, vb);
+
+ vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static int mx1_videobuf_prepare(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb, enum v4l2_field field)
+{
+ struct soc_camera_device *icd = vq->priv_data;
+ struct mx1_buffer *buf = container_of(vb, struct mx1_buffer, vb);
+ int ret;
+
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+ vb, vb->baddr, vb->bsize);
+
+ /* Added list head initialization on alloc */
+ WARN_ON(!list_empty(&vb->queue));
+
+ BUG_ON(NULL == icd->current_fmt);
+
+ /*
+ * I think, in buf_prepare you only have to protect global data,
+ * the actual buffer is yours
+ */
+ buf->inwork = 1;
+
+ if (buf->code != icd->current_fmt->code ||
+ vb->width != icd->user_width ||
+ vb->height != icd->user_height ||
+ vb->field != field) {
+ buf->code = icd->current_fmt->code;
+ vb->width = icd->user_width;
+ vb->height = icd->user_height;
+ vb->field = field;
+ vb->state = VIDEOBUF_NEEDS_INIT;
+ }
+
+ vb->size = icd->sizeimage;
+ if (0 != vb->baddr && vb->bsize < vb->size) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (vb->state == VIDEOBUF_NEEDS_INIT) {
+ ret = videobuf_iolock(vq, vb, NULL);
+ if (ret)
+ goto fail;
+
+ vb->state = VIDEOBUF_PREPARED;
+ }
+
+ buf->inwork = 0;
+
+ return 0;
+
+fail:
+ free_buffer(vq, buf);
+out:
+ buf->inwork = 0;
+ return ret;
+}
+
+static int mx1_camera_setup_dma(struct mx1_camera_dev *pcdev)
+{
+ struct videobuf_buffer *vbuf = &pcdev->active->vb;
+ struct device *dev = pcdev->icd->parent;
+ int ret;
+
+ if (unlikely(!pcdev->active)) {
+ dev_err(dev, "DMA End IRQ with no active buffer\n");
+ return -EFAULT;
+ }
+
+ /* setup sg list for future DMA */
+ ret = imx_dma_setup_single(pcdev->dma_chan,
+ videobuf_to_dma_contig(vbuf),
+ vbuf->size, pcdev->res->start +
+ CSIRXR, DMA_MODE_READ);
+ if (unlikely(ret))
+ dev_err(dev, "Failed to setup DMA sg list\n");
+
+ return ret;
+}
+
+/* Called under spinlock_irqsave(&pcdev->lock, ...) */
+static void mx1_videobuf_queue(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct soc_camera_device *icd = vq->priv_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct mx1_camera_dev *pcdev = ici->priv;
+ struct mx1_buffer *buf = container_of(vb, struct mx1_buffer, vb);
+
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+ vb, vb->baddr, vb->bsize);
+
+ list_add_tail(&vb->queue, &pcdev->capture);
+
+ vb->state = VIDEOBUF_ACTIVE;
+
+ if (!pcdev->active) {
+ pcdev->active = buf;
+
+ /* setup sg list for future DMA */
+ if (!mx1_camera_setup_dma(pcdev)) {
+ unsigned int temp;
+ /* enable SOF irq */
+ temp = __raw_readl(pcdev->base + CSICR1) |
+ CSICR1_SOF_INTEN;
+ __raw_writel(temp, pcdev->base + CSICR1);
+ }
+ }
+}
+
+static void mx1_videobuf_release(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct mx1_buffer *buf = container_of(vb, struct mx1_buffer, vb);
+#ifdef DEBUG
+ struct soc_camera_device *icd = vq->priv_data;
+ struct device *dev = icd->parent;
+
+ dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+ vb, vb->baddr, vb->bsize);
+
+ switch (vb->state) {
+ case VIDEOBUF_ACTIVE:
+ dev_dbg(dev, "%s (active)\n", __func__);
+ break;
+ case VIDEOBUF_QUEUED:
+ dev_dbg(dev, "%s (queued)\n", __func__);
+ break;
+ case VIDEOBUF_PREPARED:
+ dev_dbg(dev, "%s (prepared)\n", __func__);
+ break;
+ default:
+ dev_dbg(dev, "%s (unknown)\n", __func__);
+ break;
+ }
+#endif
+
+ free_buffer(vq, buf);
+}
+
+static void mx1_camera_wakeup(struct mx1_camera_dev *pcdev,
+ struct videobuf_buffer *vb,
+ struct mx1_buffer *buf)
+{
+ /* _init is used to debug races, see comment in mx1_camera_reqbufs() */
+ list_del_init(&vb->queue);
+ vb->state = VIDEOBUF_DONE;
+ do_gettimeofday(&vb->ts);
+ vb->field_count++;
+ wake_up(&vb->done);
+
+ if (list_empty(&pcdev->capture)) {
+ pcdev->active = NULL;
+ return;
+ }
+
+ pcdev->active = list_entry(pcdev->capture.next,
+ struct mx1_buffer, vb.queue);
+
+ /* setup sg list for future DMA */
+ if (likely(!mx1_camera_setup_dma(pcdev))) {
+ unsigned int temp;
+
+ /* enable SOF irq */
+ temp = __raw_readl(pcdev->base + CSICR1) | CSICR1_SOF_INTEN;
+ __raw_writel(temp, pcdev->base + CSICR1);
+ }
+}
+
+static void mx1_camera_dma_irq(int channel, void *data)
+{
+ struct mx1_camera_dev *pcdev = data;
+ struct device *dev = pcdev->icd->parent;
+ struct mx1_buffer *buf;
+ struct videobuf_buffer *vb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pcdev->lock, flags);
+
+ imx_dma_disable(channel);
+
+ if (unlikely(!pcdev->active)) {
+ dev_err(dev, "DMA End IRQ with no active buffer\n");
+ goto out;
+ }
+
+ vb = &pcdev->active->vb;
+ buf = container_of(vb, struct mx1_buffer, vb);
+ WARN_ON(buf->inwork || list_empty(&vb->queue));
+ dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+ vb, vb->baddr, vb->bsize);
+
+ mx1_camera_wakeup(pcdev, vb, buf);
+out:
+ spin_unlock_irqrestore(&pcdev->lock, flags);
+}
+
+static struct videobuf_queue_ops mx1_videobuf_ops = {
+ .buf_setup = mx1_videobuf_setup,
+ .buf_prepare = mx1_videobuf_prepare,
+ .buf_queue = mx1_videobuf_queue,
+ .buf_release = mx1_videobuf_release,
+};
+
+static void mx1_camera_init_videobuf(struct videobuf_queue *q,
+ struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct mx1_camera_dev *pcdev = ici->priv;
+
+ videobuf_queue_dma_contig_init(q, &mx1_videobuf_ops, icd->parent,
+ &pcdev->lock, V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ V4L2_FIELD_NONE,
+ sizeof(struct mx1_buffer), icd, &icd->video_lock);
+}
+
+static int mclk_get_divisor(struct mx1_camera_dev *pcdev)
+{
+ unsigned int mclk = pcdev->mclk;
+ unsigned long div;
+ unsigned long lcdclk;
+
+ lcdclk = clk_get_rate(pcdev->clk);
+
+ /*
+ * We verify platform_mclk_10khz != 0, so if anyone breaks it, here
+ * they get a nice Oops
+ */
+ div = (lcdclk + 2 * mclk - 1) / (2 * mclk) - 1;
+
+ dev_dbg(pcdev->icd->parent,
+ "System clock %lukHz, target freq %dkHz, divisor %lu\n",
+ lcdclk / 1000, mclk / 1000, div);
+
+ return div;
+}
+
+static void mx1_camera_activate(struct mx1_camera_dev *pcdev)
+{
+ unsigned int csicr1 = CSICR1_EN;
+
+ dev_dbg(pcdev->icd->parent, "Activate device\n");
+
+ clk_prepare_enable(pcdev->clk);
+
+ /* enable CSI before doing anything else */
+ __raw_writel(csicr1, pcdev->base + CSICR1);
+
+ csicr1 |= CSICR1_MCLKEN | CSICR1_FCC | CSICR1_GCLK_MODE;
+ csicr1 |= CSICR1_MCLKDIV(mclk_get_divisor(pcdev));
+ csicr1 |= CSICR1_RXFF_LEVEL(2); /* 16 words */
+
+ __raw_writel(csicr1, pcdev->base + CSICR1);
+}
+
+static void mx1_camera_deactivate(struct mx1_camera_dev *pcdev)
+{
+ dev_dbg(pcdev->icd->parent, "Deactivate device\n");
+
+ /* Disable all CSI interface */
+ __raw_writel(0x00, pcdev->base + CSICR1);
+
+ clk_disable_unprepare(pcdev->clk);
+}
+
+/*
+ * The following two functions absolutely depend on the fact, that
+ * there can be only one camera on i.MX1/i.MXL camera sensor interface
+ */
+static int mx1_camera_add_device(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct mx1_camera_dev *pcdev = ici->priv;
+
+ if (pcdev->icd)
+ return -EBUSY;
+
+ dev_info(icd->parent, "MX1 Camera driver attached to camera %d\n",
+ icd->devnum);
+
+ mx1_camera_activate(pcdev);
+
+ pcdev->icd = icd;
+
+ return 0;
+}
+
+static void mx1_camera_remove_device(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct mx1_camera_dev *pcdev = ici->priv;
+ unsigned int csicr1;
+
+ BUG_ON(icd != pcdev->icd);
+
+ /* disable interrupts */
+ csicr1 = __raw_readl(pcdev->base + CSICR1) & ~CSI_IRQ_MASK;
+ __raw_writel(csicr1, pcdev->base + CSICR1);
+
+ /* Stop DMA engine */
+ imx_dma_disable(pcdev->dma_chan);
+
+ dev_info(icd->parent, "MX1 Camera driver detached from camera %d\n",
+ icd->devnum);
+
+ mx1_camera_deactivate(pcdev);
+
+ pcdev->icd = NULL;
+}
+
+static int mx1_camera_set_crop(struct soc_camera_device *icd,
+ struct v4l2_crop *a)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+
+ return v4l2_subdev_call(sd, video, s_crop, a);
+}
+
+static int mx1_camera_set_bus_param(struct soc_camera_device *icd)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct mx1_camera_dev *pcdev = ici->priv;
+ struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ unsigned long common_flags;
+ unsigned int csicr1;
+ int ret;
+
+ /* MX1 supports only 8bit buswidth */
+ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+ if (!ret) {
+ common_flags = soc_mbus_config_compatible(&cfg, CSI_BUS_FLAGS);
+ if (!common_flags) {
+ dev_warn(icd->parent,
+ "Flags incompatible: camera 0x%x, host 0x%x\n",
+ cfg.flags, CSI_BUS_FLAGS);
+ return -EINVAL;
+ }
+ } else if (ret != -ENOIOCTLCMD) {
+ return ret;
+ } else {
+ common_flags = CSI_BUS_FLAGS;
+ }
+
+ /* Make choises, based on platform choice */
+ if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) {
+ if (!pcdev->pdata ||
+ pcdev->pdata->flags & MX1_CAMERA_VSYNC_HIGH)
+ common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW;
+ else
+ common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH;
+ }
+
+ if ((common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) &&
+ (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)) {
+ if (!pcdev->pdata ||
+ pcdev->pdata->flags & MX1_CAMERA_PCLK_RISING)
+ common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_FALLING;
+ else
+ common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_RISING;
+ }
+
+ if ((common_flags & V4L2_MBUS_DATA_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_DATA_ACTIVE_LOW)) {
+ if (!pcdev->pdata ||
+ pcdev->pdata->flags & MX1_CAMERA_DATA_HIGH)
+ common_flags &= ~V4L2_MBUS_DATA_ACTIVE_LOW;
+ else
+ common_flags &= ~V4L2_MBUS_DATA_ACTIVE_HIGH;
+ }
+
+ cfg.flags = common_flags;
+ ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_dbg(icd->parent, "camera s_mbus_config(0x%lx) returned %d\n",
+ common_flags, ret);
+ return ret;
+ }
+
+ csicr1 = __raw_readl(pcdev->base + CSICR1);
+
+ if (common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
+ csicr1 |= CSICR1_REDGE;
+ if (common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
+ csicr1 |= CSICR1_SOF_POL;
+ if (common_flags & V4L2_MBUS_DATA_ACTIVE_LOW)
+ csicr1 |= CSICR1_DATA_POL;
+
+ __raw_writel(csicr1, pcdev->base + CSICR1);
+
+ return 0;
+}
+
+static int mx1_camera_set_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ const struct soc_camera_format_xlate *xlate;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_mbus_framefmt mf;
+ int ret, buswidth;
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+ if (!xlate) {
+ dev_warn(icd->parent, "Format %x not found\n",
+ pix->pixelformat);
+ return -EINVAL;
+ }
+
+ buswidth = xlate->host_fmt->bits_per_sample;
+ if (buswidth > 8) {
+ dev_warn(icd->parent,
+ "bits-per-sample %d for format %x unsupported\n",
+ buswidth, pix->pixelformat);
+ return -EINVAL;
+ }
+
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
+
+ ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ if (mf.code != xlate->code)
+ return -EINVAL;
+
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->field = mf.field;
+ pix->colorspace = mf.colorspace;
+ icd->current_fmt = xlate;
+
+ return ret;
+}
+
+static int mx1_camera_try_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ const struct soc_camera_format_xlate *xlate;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_mbus_framefmt mf;
+ int ret;
+ /* TODO: limit to mx1 hardware capabilities */
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+ if (!xlate) {
+ dev_warn(icd->parent, "Format %x not found\n",
+ pix->pixelformat);
+ return -EINVAL;
+ }
+
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
+
+ /* limit to sensor capabilities */
+ ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->field = mf.field;
+ pix->colorspace = mf.colorspace;
+
+ return 0;
+}
+
+static int mx1_camera_reqbufs(struct soc_camera_device *icd,
+ struct v4l2_requestbuffers *p)
+{
+ int i;
+
+ /*
+ * This is for locking debugging only. I removed spinlocks and now I
+ * check whether .prepare is ever called on a linked buffer, or whether
+ * a dma IRQ can occur for an in-work or unlinked buffer. Until now
+ * it hadn't triggered
+ */
+ for (i = 0; i < p->count; i++) {
+ struct mx1_buffer *buf = container_of(icd->vb_vidq.bufs[i],
+ struct mx1_buffer, vb);
+ buf->inwork = 0;
+ INIT_LIST_HEAD(&buf->vb.queue);
+ }
+
+ return 0;
+}
+
+static unsigned int mx1_camera_poll(struct file *file, poll_table *pt)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct mx1_buffer *buf;
+
+ buf = list_entry(icd->vb_vidq.stream.next, struct mx1_buffer,
+ vb.stream);
+
+ poll_wait(file, &buf->vb.done, pt);
+
+ if (buf->vb.state == VIDEOBUF_DONE ||
+ buf->vb.state == VIDEOBUF_ERROR)
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static int mx1_camera_querycap(struct soc_camera_host *ici,
+ struct v4l2_capability *cap)
+{
+ /* cap->name is set by the friendly caller:-> */
+ strlcpy(cap->card, "i.MX1/i.MXL Camera", sizeof(cap->card));
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+
+ return 0;
+}
+
+static struct soc_camera_host_ops mx1_soc_camera_host_ops = {
+ .owner = THIS_MODULE,
+ .add = mx1_camera_add_device,
+ .remove = mx1_camera_remove_device,
+ .set_bus_param = mx1_camera_set_bus_param,
+ .set_crop = mx1_camera_set_crop,
+ .set_fmt = mx1_camera_set_fmt,
+ .try_fmt = mx1_camera_try_fmt,
+ .init_videobuf = mx1_camera_init_videobuf,
+ .reqbufs = mx1_camera_reqbufs,
+ .poll = mx1_camera_poll,
+ .querycap = mx1_camera_querycap,
+};
+
+static struct fiq_handler fh = {
+ .name = "csi_sof"
+};
+
+static int __init mx1_camera_probe(struct platform_device *pdev)
+{
+ struct mx1_camera_dev *pcdev;
+ struct resource *res;
+ struct pt_regs regs;
+ struct clk *clk;
+ void __iomem *base;
+ unsigned int irq;
+ int err = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!res || (int)irq <= 0) {
+ err = -ENODEV;
+ goto exit;
+ }
+
+ clk = clk_get(&pdev->dev, "csi_clk");
+ if (IS_ERR(clk)) {
+ err = PTR_ERR(clk);
+ goto exit;
+ }
+
+ pcdev = kzalloc(sizeof(*pcdev), GFP_KERNEL);
+ if (!pcdev) {
+ dev_err(&pdev->dev, "Could not allocate pcdev\n");
+ err = -ENOMEM;
+ goto exit_put_clk;
+ }
+
+ pcdev->res = res;
+ pcdev->clk = clk;
+
+ pcdev->pdata = pdev->dev.platform_data;
+
+ if (pcdev->pdata)
+ pcdev->mclk = pcdev->pdata->mclk_10khz * 10000;
+
+ if (!pcdev->mclk) {
+ dev_warn(&pdev->dev,
+ "mclk_10khz == 0! Please, fix your platform data. "
+ "Using default 20MHz\n");
+ pcdev->mclk = 20000000;
+ }
+
+ INIT_LIST_HEAD(&pcdev->capture);
+ spin_lock_init(&pcdev->lock);
+
+ /*
+ * Request the regions.
+ */
+ if (!request_mem_region(res->start, resource_size(res), DRIVER_NAME)) {
+ err = -EBUSY;
+ goto exit_kfree;
+ }
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base) {
+ err = -ENOMEM;
+ goto exit_release;
+ }
+ pcdev->irq = irq;
+ pcdev->base = base;
+
+ /* request dma */
+ pcdev->dma_chan = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_HIGH);
+ if (pcdev->dma_chan < 0) {
+ dev_err(&pdev->dev, "Can't request DMA for MX1 CSI\n");
+ err = -EBUSY;
+ goto exit_iounmap;
+ }
+ dev_dbg(&pdev->dev, "got DMA channel %d\n", pcdev->dma_chan);
+
+ imx_dma_setup_handlers(pcdev->dma_chan, mx1_camera_dma_irq, NULL,
+ pcdev);
+
+ imx_dma_config_channel(pcdev->dma_chan, IMX_DMA_TYPE_FIFO,
+ IMX_DMA_MEMSIZE_32, MX1_DMA_REQ_CSI_R, 0);
+ /* burst length : 16 words = 64 bytes */
+ imx_dma_config_burstlen(pcdev->dma_chan, 0);
+
+ /* request irq */
+ err = claim_fiq(&fh);
+ if (err) {
+ dev_err(&pdev->dev, "Camera interrupt register failed \n");
+ goto exit_free_dma;
+ }
+
+ set_fiq_handler(&mx1_camera_sof_fiq_start, &mx1_camera_sof_fiq_end -
+ &mx1_camera_sof_fiq_start);
+
+ regs.ARM_r8 = (long)MX1_DMA_DIMR;
+ regs.ARM_r9 = (long)MX1_DMA_CCR(pcdev->dma_chan);
+ regs.ARM_r10 = (long)pcdev->base + CSICR1;
+ regs.ARM_fp = (long)pcdev->base + CSISR;
+ regs.ARM_sp = 1 << pcdev->dma_chan;
+ set_fiq_regs(&regs);
+
+ mxc_set_irq_fiq(irq, 1);
+ enable_fiq(irq);
+
+ pcdev->soc_host.drv_name = DRIVER_NAME;
+ pcdev->soc_host.ops = &mx1_soc_camera_host_ops;
+ pcdev->soc_host.priv = pcdev;
+ pcdev->soc_host.v4l2_dev.dev = &pdev->dev;
+ pcdev->soc_host.nr = pdev->id;
+ err = soc_camera_host_register(&pcdev->soc_host);
+ if (err)
+ goto exit_free_irq;
+
+ dev_info(&pdev->dev, "MX1 Camera driver loaded\n");
+
+ return 0;
+
+exit_free_irq:
+ disable_fiq(irq);
+ mxc_set_irq_fiq(irq, 0);
+ release_fiq(&fh);
+exit_free_dma:
+ imx_dma_free(pcdev->dma_chan);
+exit_iounmap:
+ iounmap(base);
+exit_release:
+ release_mem_region(res->start, resource_size(res));
+exit_kfree:
+ kfree(pcdev);
+exit_put_clk:
+ clk_put(clk);
+exit:
+ return err;
+}
+
+static int __exit mx1_camera_remove(struct platform_device *pdev)
+{
+ struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
+ struct mx1_camera_dev *pcdev = container_of(soc_host,
+ struct mx1_camera_dev, soc_host);
+ struct resource *res;
+
+ imx_dma_free(pcdev->dma_chan);
+ disable_fiq(pcdev->irq);
+ mxc_set_irq_fiq(pcdev->irq, 0);
+ release_fiq(&fh);
+
+ clk_put(pcdev->clk);
+
+ soc_camera_host_unregister(soc_host);
+
+ iounmap(pcdev->base);
+
+ res = pcdev->res;
+ release_mem_region(res->start, resource_size(res));
+
+ kfree(pcdev);
+
+ dev_info(&pdev->dev, "MX1 Camera driver unloaded\n");
+
+ return 0;
+}
+
+static struct platform_driver mx1_camera_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .remove = __exit_p(mx1_camera_remove),
+};
+
+static int __init mx1_camera_init(void)
+{
+ return platform_driver_probe(&mx1_camera_driver, mx1_camera_probe);
+}
+
+static void __exit mx1_camera_exit(void)
+{
+ return platform_driver_unregister(&mx1_camera_driver);
+}
+
+module_init(mx1_camera_init);
+module_exit(mx1_camera_exit);
+
+MODULE_DESCRIPTION("i.MX1/i.MXL SoC Camera Host driver");
+MODULE_AUTHOR("Paulius Zaleckas <paulius.zaleckas@teltonika.lt>");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/media/platform/soc_camera/mx2_camera.c b/drivers/media/platform/soc_camera/mx2_camera.c
new file mode 100644
index 0000000..403d7f1
--- /dev/null
+++ b/drivers/media/platform/soc_camera/mx2_camera.c
@@ -0,0 +1,1863 @@
+/*
+ * V4L2 Driver for i.MX27/i.MX25 camera host
+ *
+ * Copyright (C) 2008, Sascha Hauer, Pengutronix
+ * Copyright (C) 2010, Baruch Siach, Orex Computed Radiography
+ * Copyright (C) 2012, Javier Martin, Vista Silicon S.L.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/gcd.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/mm.h>
+#include <linux/moduleparam.h>
+#include <linux/time.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-dev.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/soc_camera.h>
+#include <media/soc_mediabus.h>
+
+#include <linux/videodev2.h>
+
+#include <linux/platform_data/camera-mx2.h>
+#include <mach/hardware.h>
+
+#include <asm/dma.h>
+
+#define MX2_CAM_DRV_NAME "mx2-camera"
+#define MX2_CAM_VERSION "0.0.6"
+#define MX2_CAM_DRIVER_DESCRIPTION "i.MX2x_Camera"
+
+/* reset values */
+#define CSICR1_RESET_VAL 0x40000800
+#define CSICR2_RESET_VAL 0x0
+#define CSICR3_RESET_VAL 0x0
+
+/* csi control reg 1 */
+#define CSICR1_SWAP16_EN (1 << 31)
+#define CSICR1_EXT_VSYNC (1 << 30)
+#define CSICR1_EOF_INTEN (1 << 29)
+#define CSICR1_PRP_IF_EN (1 << 28)
+#define CSICR1_CCIR_MODE (1 << 27)
+#define CSICR1_COF_INTEN (1 << 26)
+#define CSICR1_SF_OR_INTEN (1 << 25)
+#define CSICR1_RF_OR_INTEN (1 << 24)
+#define CSICR1_STATFF_LEVEL (3 << 22)
+#define CSICR1_STATFF_INTEN (1 << 21)
+#define CSICR1_RXFF_LEVEL(l) (((l) & 3) << 19) /* MX27 */
+#define CSICR1_FB2_DMA_INTEN (1 << 20) /* MX25 */
+#define CSICR1_FB1_DMA_INTEN (1 << 19) /* MX25 */
+#define CSICR1_RXFF_INTEN (1 << 18)
+#define CSICR1_SOF_POL (1 << 17)
+#define CSICR1_SOF_INTEN (1 << 16)
+#define CSICR1_MCLKDIV(d) (((d) & 0xF) << 12)
+#define CSICR1_HSYNC_POL (1 << 11)
+#define CSICR1_CCIR_EN (1 << 10)
+#define CSICR1_MCLKEN (1 << 9)
+#define CSICR1_FCC (1 << 8)
+#define CSICR1_PACK_DIR (1 << 7)
+#define CSICR1_CLR_STATFIFO (1 << 6)
+#define CSICR1_CLR_RXFIFO (1 << 5)
+#define CSICR1_GCLK_MODE (1 << 4)
+#define CSICR1_INV_DATA (1 << 3)
+#define CSICR1_INV_PCLK (1 << 2)
+#define CSICR1_REDGE (1 << 1)
+#define CSICR1_FMT_MASK (CSICR1_PACK_DIR | CSICR1_SWAP16_EN)
+
+#define SHIFT_STATFF_LEVEL 22
+#define SHIFT_RXFF_LEVEL 19
+#define SHIFT_MCLKDIV 12
+
+/* control reg 3 */
+#define CSICR3_FRMCNT (0xFFFF << 16)
+#define CSICR3_FRMCNT_RST (1 << 15)
+#define CSICR3_DMA_REFLASH_RFF (1 << 14)
+#define CSICR3_DMA_REFLASH_SFF (1 << 13)
+#define CSICR3_DMA_REQ_EN_RFF (1 << 12)
+#define CSICR3_DMA_REQ_EN_SFF (1 << 11)
+#define CSICR3_RXFF_LEVEL(l) (((l) & 7) << 4) /* MX25 */
+#define CSICR3_CSI_SUP (1 << 3)
+#define CSICR3_ZERO_PACK_EN (1 << 2)
+#define CSICR3_ECC_INT_EN (1 << 1)
+#define CSICR3_ECC_AUTO_EN (1 << 0)
+
+#define SHIFT_FRMCNT 16
+
+/* csi status reg */
+#define CSISR_SFF_OR_INT (1 << 25)
+#define CSISR_RFF_OR_INT (1 << 24)
+#define CSISR_STATFF_INT (1 << 21)
+#define CSISR_DMA_TSF_FB2_INT (1 << 20) /* MX25 */
+#define CSISR_DMA_TSF_FB1_INT (1 << 19) /* MX25 */
+#define CSISR_RXFF_INT (1 << 18)
+#define CSISR_EOF_INT (1 << 17)
+#define CSISR_SOF_INT (1 << 16)
+#define CSISR_F2_INT (1 << 15)
+#define CSISR_F1_INT (1 << 14)
+#define CSISR_COF_INT (1 << 13)
+#define CSISR_ECC_INT (1 << 1)
+#define CSISR_DRDY (1 << 0)
+
+#define CSICR1 0x00
+#define CSICR2 0x04
+#define CSISR (cpu_is_mx27() ? 0x08 : 0x18)
+#define CSISTATFIFO 0x0c
+#define CSIRFIFO 0x10
+#define CSIRXCNT 0x14
+#define CSICR3 (cpu_is_mx27() ? 0x1C : 0x08)
+#define CSIDMASA_STATFIFO 0x20
+#define CSIDMATA_STATFIFO 0x24
+#define CSIDMASA_FB1 0x28
+#define CSIDMASA_FB2 0x2c
+#define CSIFBUF_PARA 0x30
+#define CSIIMAG_PARA 0x34
+
+/* EMMA PrP */
+#define PRP_CNTL 0x00
+#define PRP_INTR_CNTL 0x04
+#define PRP_INTRSTATUS 0x08
+#define PRP_SOURCE_Y_PTR 0x0c
+#define PRP_SOURCE_CB_PTR 0x10
+#define PRP_SOURCE_CR_PTR 0x14
+#define PRP_DEST_RGB1_PTR 0x18
+#define PRP_DEST_RGB2_PTR 0x1c
+#define PRP_DEST_Y_PTR 0x20
+#define PRP_DEST_CB_PTR 0x24
+#define PRP_DEST_CR_PTR 0x28
+#define PRP_SRC_FRAME_SIZE 0x2c
+#define PRP_DEST_CH1_LINE_STRIDE 0x30
+#define PRP_SRC_PIXEL_FORMAT_CNTL 0x34
+#define PRP_CH1_PIXEL_FORMAT_CNTL 0x38
+#define PRP_CH1_OUT_IMAGE_SIZE 0x3c
+#define PRP_CH2_OUT_IMAGE_SIZE 0x40
+#define PRP_SRC_LINE_STRIDE 0x44
+#define PRP_CSC_COEF_012 0x48
+#define PRP_CSC_COEF_345 0x4c
+#define PRP_CSC_COEF_678 0x50
+#define PRP_CH1_RZ_HORI_COEF1 0x54
+#define PRP_CH1_RZ_HORI_COEF2 0x58
+#define PRP_CH1_RZ_HORI_VALID 0x5c
+#define PRP_CH1_RZ_VERT_COEF1 0x60
+#define PRP_CH1_RZ_VERT_COEF2 0x64
+#define PRP_CH1_RZ_VERT_VALID 0x68
+#define PRP_CH2_RZ_HORI_COEF1 0x6c
+#define PRP_CH2_RZ_HORI_COEF2 0x70
+#define PRP_CH2_RZ_HORI_VALID 0x74
+#define PRP_CH2_RZ_VERT_COEF1 0x78
+#define PRP_CH2_RZ_VERT_COEF2 0x7c
+#define PRP_CH2_RZ_VERT_VALID 0x80
+
+#define PRP_CNTL_CH1EN (1 << 0)
+#define PRP_CNTL_CH2EN (1 << 1)
+#define PRP_CNTL_CSIEN (1 << 2)
+#define PRP_CNTL_DATA_IN_YUV420 (0 << 3)
+#define PRP_CNTL_DATA_IN_YUV422 (1 << 3)
+#define PRP_CNTL_DATA_IN_RGB16 (2 << 3)
+#define PRP_CNTL_DATA_IN_RGB32 (3 << 3)
+#define PRP_CNTL_CH1_OUT_RGB8 (0 << 5)
+#define PRP_CNTL_CH1_OUT_RGB16 (1 << 5)
+#define PRP_CNTL_CH1_OUT_RGB32 (2 << 5)
+#define PRP_CNTL_CH1_OUT_YUV422 (3 << 5)
+#define PRP_CNTL_CH2_OUT_YUV420 (0 << 7)
+#define PRP_CNTL_CH2_OUT_YUV422 (1 << 7)
+#define PRP_CNTL_CH2_OUT_YUV444 (2 << 7)
+#define PRP_CNTL_CH1_LEN (1 << 9)
+#define PRP_CNTL_CH2_LEN (1 << 10)
+#define PRP_CNTL_SKIP_FRAME (1 << 11)
+#define PRP_CNTL_SWRST (1 << 12)
+#define PRP_CNTL_CLKEN (1 << 13)
+#define PRP_CNTL_WEN (1 << 14)
+#define PRP_CNTL_CH1BYP (1 << 15)
+#define PRP_CNTL_IN_TSKIP(x) ((x) << 16)
+#define PRP_CNTL_CH1_TSKIP(x) ((x) << 19)
+#define PRP_CNTL_CH2_TSKIP(x) ((x) << 22)
+#define PRP_CNTL_INPUT_FIFO_LEVEL(x) ((x) << 25)
+#define PRP_CNTL_RZ_FIFO_LEVEL(x) ((x) << 27)
+#define PRP_CNTL_CH2B1EN (1 << 29)
+#define PRP_CNTL_CH2B2EN (1 << 30)
+#define PRP_CNTL_CH2FEN (1 << 31)
+
+/* IRQ Enable and status register */
+#define PRP_INTR_RDERR (1 << 0)
+#define PRP_INTR_CH1WERR (1 << 1)
+#define PRP_INTR_CH2WERR (1 << 2)
+#define PRP_INTR_CH1FC (1 << 3)
+#define PRP_INTR_CH2FC (1 << 5)
+#define PRP_INTR_LBOVF (1 << 7)
+#define PRP_INTR_CH2OVF (1 << 8)
+
+/* Resizing registers */
+#define PRP_RZ_VALID_TBL_LEN(x) ((x) << 24)
+#define PRP_RZ_VALID_BILINEAR (1 << 31)
+
+#define MAX_VIDEO_MEM 16
+
+#define RESIZE_NUM_MIN 1
+#define RESIZE_NUM_MAX 20
+#define BC_COEF 3
+#define SZ_COEF (1 << BC_COEF)
+
+#define RESIZE_DIR_H 0
+#define RESIZE_DIR_V 1
+
+#define RESIZE_ALGO_BILINEAR 0
+#define RESIZE_ALGO_AVERAGING 1
+
+struct mx2_prp_cfg {
+ int channel;
+ u32 in_fmt;
+ u32 out_fmt;
+ u32 src_pixel;
+ u32 ch1_pixel;
+ u32 irq_flags;
+ u32 csicr1;
+};
+
+/* prp resizing parameters */
+struct emma_prp_resize {
+ int algo; /* type of algorithm used */
+ int len; /* number of coefficients */
+ unsigned char s[RESIZE_NUM_MAX]; /* table of coefficients */
+};
+
+/* prp configuration for a client-host fmt pair */
+struct mx2_fmt_cfg {
+ enum v4l2_mbus_pixelcode in_fmt;
+ u32 out_fmt;
+ struct mx2_prp_cfg cfg;
+};
+
+enum mx2_buffer_state {
+ MX2_STATE_QUEUED,
+ MX2_STATE_ACTIVE,
+ MX2_STATE_DONE,
+};
+
+struct mx2_buf_internal {
+ struct list_head queue;
+ int bufnum;
+ bool discard;
+};
+
+/* buffer for one video frame */
+struct mx2_buffer {
+ /* common v4l buffer stuff -- must be first */
+ struct vb2_buffer vb;
+ enum mx2_buffer_state state;
+ struct mx2_buf_internal internal;
+};
+
+struct mx2_camera_dev {
+ struct device *dev;
+ struct soc_camera_host soc_host;
+ struct soc_camera_device *icd;
+ struct clk *clk_csi, *clk_emma_ahb, *clk_emma_ipg;
+
+ void __iomem *base_csi, *base_emma;
+
+ struct mx2_camera_platform_data *pdata;
+ unsigned long platform_flags;
+
+ struct list_head capture;
+ struct list_head active_bufs;
+ struct list_head discard;
+
+ spinlock_t lock;
+
+ int dma;
+ struct mx2_buffer *active;
+ struct mx2_buffer *fb1_active;
+ struct mx2_buffer *fb2_active;
+
+ u32 csicr1;
+
+ struct mx2_buf_internal buf_discard[2];
+ void *discard_buffer;
+ dma_addr_t discard_buffer_dma;
+ size_t discard_size;
+ struct mx2_fmt_cfg *emma_prp;
+ struct emma_prp_resize resizing[2];
+ unsigned int s_width, s_height;
+ u32 frame_count;
+ struct vb2_alloc_ctx *alloc_ctx;
+};
+
+static struct mx2_buffer *mx2_ibuf_to_buf(struct mx2_buf_internal *int_buf)
+{
+ return container_of(int_buf, struct mx2_buffer, internal);
+}
+
+static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
+ /*
+ * This is a generic configuration which is valid for most
+ * prp input-output format combinations.
+ * We set the incomming and outgoing pixelformat to a
+ * 16 Bit wide format and adjust the bytesperline
+ * accordingly. With this configuration the inputdata
+ * will not be changed by the emma and could be any type
+ * of 16 Bit Pixelformat.
+ */
+ {
+ .in_fmt = 0,
+ .out_fmt = 0,
+ .cfg = {
+ .channel = 1,
+ .in_fmt = PRP_CNTL_DATA_IN_RGB16,
+ .out_fmt = PRP_CNTL_CH1_OUT_RGB16,
+ .src_pixel = 0x2ca00565, /* RGB565 */
+ .ch1_pixel = 0x2ca00565, /* RGB565 */
+ .irq_flags = PRP_INTR_RDERR | PRP_INTR_CH1WERR |
+ PRP_INTR_CH1FC | PRP_INTR_LBOVF,
+ .csicr1 = 0,
+ }
+ },
+ {
+ .in_fmt = V4L2_MBUS_FMT_UYVY8_2X8,
+ .out_fmt = V4L2_PIX_FMT_YUYV,
+ .cfg = {
+ .channel = 1,
+ .in_fmt = PRP_CNTL_DATA_IN_YUV422,
+ .out_fmt = PRP_CNTL_CH1_OUT_YUV422,
+ .src_pixel = 0x22000888, /* YUV422 (YUYV) */
+ .ch1_pixel = 0x62000888, /* YUV422 (YUYV) */
+ .irq_flags = PRP_INTR_RDERR | PRP_INTR_CH1WERR |
+ PRP_INTR_CH1FC | PRP_INTR_LBOVF,
+ .csicr1 = CSICR1_SWAP16_EN,
+ }
+ },
+ {
+ .in_fmt = V4L2_MBUS_FMT_YUYV8_2X8,
+ .out_fmt = V4L2_PIX_FMT_YUYV,
+ .cfg = {
+ .channel = 1,
+ .in_fmt = PRP_CNTL_DATA_IN_YUV422,
+ .out_fmt = PRP_CNTL_CH1_OUT_YUV422,
+ .src_pixel = 0x22000888, /* YUV422 (YUYV) */
+ .ch1_pixel = 0x62000888, /* YUV422 (YUYV) */
+ .irq_flags = PRP_INTR_RDERR | PRP_INTR_CH1WERR |
+ PRP_INTR_CH1FC | PRP_INTR_LBOVF,
+ .csicr1 = CSICR1_PACK_DIR,
+ }
+ },
+ {
+ .in_fmt = V4L2_MBUS_FMT_YUYV8_2X8,
+ .out_fmt = V4L2_PIX_FMT_YUV420,
+ .cfg = {
+ .channel = 2,
+ .in_fmt = PRP_CNTL_DATA_IN_YUV422,
+ .out_fmt = PRP_CNTL_CH2_OUT_YUV420,
+ .src_pixel = 0x22000888, /* YUV422 (YUYV) */
+ .irq_flags = PRP_INTR_RDERR | PRP_INTR_CH2WERR |
+ PRP_INTR_CH2FC | PRP_INTR_LBOVF |
+ PRP_INTR_CH2OVF,
+ .csicr1 = CSICR1_PACK_DIR,
+ }
+ },
+ {
+ .in_fmt = V4L2_MBUS_FMT_UYVY8_2X8,
+ .out_fmt = V4L2_PIX_FMT_YUV420,
+ .cfg = {
+ .channel = 2,
+ .in_fmt = PRP_CNTL_DATA_IN_YUV422,
+ .out_fmt = PRP_CNTL_CH2_OUT_YUV420,
+ .src_pixel = 0x22000888, /* YUV422 (YUYV) */
+ .irq_flags = PRP_INTR_RDERR | PRP_INTR_CH2WERR |
+ PRP_INTR_CH2FC | PRP_INTR_LBOVF |
+ PRP_INTR_CH2OVF,
+ .csicr1 = CSICR1_SWAP16_EN,
+ }
+ },
+};
+
+static struct mx2_fmt_cfg *mx27_emma_prp_get_format(
+ enum v4l2_mbus_pixelcode in_fmt,
+ u32 out_fmt)
+{
+ int i;
+
+ for (i = 1; i < ARRAY_SIZE(mx27_emma_prp_table); i++)
+ if ((mx27_emma_prp_table[i].in_fmt == in_fmt) &&
+ (mx27_emma_prp_table[i].out_fmt == out_fmt)) {
+ return &mx27_emma_prp_table[i];
+ }
+ /* If no match return the most generic configuration */
+ return &mx27_emma_prp_table[0];
+};
+
+static void mx27_update_emma_buf(struct mx2_camera_dev *pcdev,
+ unsigned long phys, int bufnum)
+{
+ struct mx2_fmt_cfg *prp = pcdev->emma_prp;
+
+ if (prp->cfg.channel == 1) {
+ writel(phys, pcdev->base_emma +
+ PRP_DEST_RGB1_PTR + 4 * bufnum);
+ } else {
+ writel(phys, pcdev->base_emma +
+ PRP_DEST_Y_PTR - 0x14 * bufnum);
+ if (prp->out_fmt == V4L2_PIX_FMT_YUV420) {
+ u32 imgsize = pcdev->icd->user_height *
+ pcdev->icd->user_width;
+
+ writel(phys + imgsize, pcdev->base_emma +
+ PRP_DEST_CB_PTR - 0x14 * bufnum);
+ writel(phys + ((5 * imgsize) / 4), pcdev->base_emma +
+ PRP_DEST_CR_PTR - 0x14 * bufnum);
+ }
+ }
+}
+
+static void mx2_camera_deactivate(struct mx2_camera_dev *pcdev)
+{
+ unsigned long flags;
+
+ clk_disable_unprepare(pcdev->clk_csi);
+ writel(0, pcdev->base_csi + CSICR1);
+ if (cpu_is_mx27()) {
+ writel(0, pcdev->base_emma + PRP_CNTL);
+ } else if (cpu_is_mx25()) {
+ spin_lock_irqsave(&pcdev->lock, flags);
+ pcdev->fb1_active = NULL;
+ pcdev->fb2_active = NULL;
+ writel(0, pcdev->base_csi + CSIDMASA_FB1);
+ writel(0, pcdev->base_csi + CSIDMASA_FB2);
+ spin_unlock_irqrestore(&pcdev->lock, flags);
+ }
+}
+
+/*
+ * The following two functions absolutely depend on the fact, that
+ * there can be only one camera on mx2 camera sensor interface
+ */
+static int mx2_camera_add_device(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct mx2_camera_dev *pcdev = ici->priv;
+ int ret;
+ u32 csicr1;
+
+ if (pcdev->icd)
+ return -EBUSY;
+
+ ret = clk_prepare_enable(pcdev->clk_csi);
+ if (ret < 0)
+ return ret;
+
+ csicr1 = CSICR1_MCLKEN;
+
+ if (cpu_is_mx27())
+ csicr1 |= CSICR1_PRP_IF_EN | CSICR1_FCC |
+ CSICR1_RXFF_LEVEL(0);
+
+ pcdev->csicr1 = csicr1;
+ writel(pcdev->csicr1, pcdev->base_csi + CSICR1);
+
+ pcdev->icd = icd;
+ pcdev->frame_count = 0;
+
+ dev_info(icd->parent, "Camera driver attached to camera %d\n",
+ icd->devnum);
+
+ return 0;
+}
+
+static void mx2_camera_remove_device(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct mx2_camera_dev *pcdev = ici->priv;
+
+ BUG_ON(icd != pcdev->icd);
+
+ dev_info(icd->parent, "Camera driver detached from camera %d\n",
+ icd->devnum);
+
+ mx2_camera_deactivate(pcdev);
+
+ pcdev->icd = NULL;
+}
+
+static void mx25_camera_frame_done(struct mx2_camera_dev *pcdev, int fb,
+ int state)
+{
+ struct vb2_buffer *vb;
+ struct mx2_buffer *buf;
+ struct mx2_buffer **fb_active = fb == 1 ? &pcdev->fb1_active :
+ &pcdev->fb2_active;
+ u32 fb_reg = fb == 1 ? CSIDMASA_FB1 : CSIDMASA_FB2;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pcdev->lock, flags);
+
+ if (*fb_active == NULL)
+ goto out;
+
+ vb = &(*fb_active)->vb;
+ dev_dbg(pcdev->dev, "%s (vb=0x%p) 0x%p %lu\n", __func__,
+ vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
+
+ do_gettimeofday(&vb->v4l2_buf.timestamp);
+ vb->v4l2_buf.sequence++;
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+
+ if (list_empty(&pcdev->capture)) {
+ buf = NULL;
+ writel(0, pcdev->base_csi + fb_reg);
+ } else {
+ buf = list_first_entry(&pcdev->capture, struct mx2_buffer,
+ internal.queue);
+ vb = &buf->vb;
+ list_del(&buf->internal.queue);
+ buf->state = MX2_STATE_ACTIVE;
+ writel(vb2_dma_contig_plane_dma_addr(vb, 0),
+ pcdev->base_csi + fb_reg);
+ }
+
+ *fb_active = buf;
+
+out:
+ spin_unlock_irqrestore(&pcdev->lock, flags);
+}
+
+static irqreturn_t mx25_camera_irq(int irq_csi, void *data)
+{
+ struct mx2_camera_dev *pcdev = data;
+ u32 status = readl(pcdev->base_csi + CSISR);
+
+ if (status & CSISR_DMA_TSF_FB1_INT)
+ mx25_camera_frame_done(pcdev, 1, MX2_STATE_DONE);
+ else if (status & CSISR_DMA_TSF_FB2_INT)
+ mx25_camera_frame_done(pcdev, 2, MX2_STATE_DONE);
+
+ /* FIXME: handle CSISR_RFF_OR_INT */
+
+ writel(status, pcdev->base_csi + CSISR);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Videobuf operations
+ */
+static int mx2_videobuf_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *count, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct mx2_camera_dev *pcdev = ici->priv;
+
+ dev_dbg(icd->parent, "count=%d, size=%d\n", *count, sizes[0]);
+
+ /* TODO: support for VIDIOC_CREATE_BUFS not ready */
+ if (fmt != NULL)
+ return -ENOTTY;
+
+ alloc_ctxs[0] = pcdev->alloc_ctx;
+
+ sizes[0] = icd->sizeimage;
+
+ if (0 == *count)
+ *count = 32;
+ if (!*num_planes &&
+ sizes[0] * *count > MAX_VIDEO_MEM * 1024 * 1024)
+ *count = (MAX_VIDEO_MEM * 1024 * 1024) / sizes[0];
+
+ *num_planes = 1;
+
+ return 0;
+}
+
+static int mx2_videobuf_prepare(struct vb2_buffer *vb)
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+ int ret = 0;
+
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
+ vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
+
+#ifdef DEBUG
+ /*
+ * This can be useful if you want to see if we actually fill
+ * the buffer with something
+ */
+ memset((void *)vb2_plane_vaddr(vb, 0),
+ 0xaa, vb2_get_plane_payload(vb, 0));
+#endif
+
+ vb2_set_plane_payload(vb, 0, icd->sizeimage);
+ if (vb2_plane_vaddr(vb, 0) &&
+ vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ return 0;
+
+out:
+ return ret;
+}
+
+static void mx2_videobuf_queue(struct vb2_buffer *vb)
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+ struct soc_camera_host *ici =
+ to_soc_camera_host(icd->parent);
+ struct mx2_camera_dev *pcdev = ici->priv;
+ struct mx2_buffer *buf = container_of(vb, struct mx2_buffer, vb);
+ unsigned long flags;
+
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
+ vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
+
+ spin_lock_irqsave(&pcdev->lock, flags);
+
+ buf->state = MX2_STATE_QUEUED;
+ list_add_tail(&buf->internal.queue, &pcdev->capture);
+
+ if (cpu_is_mx25()) {
+ u32 csicr3, dma_inten = 0;
+
+ if (pcdev->fb1_active == NULL) {
+ writel(vb2_dma_contig_plane_dma_addr(vb, 0),
+ pcdev->base_csi + CSIDMASA_FB1);
+ pcdev->fb1_active = buf;
+ dma_inten = CSICR1_FB1_DMA_INTEN;
+ } else if (pcdev->fb2_active == NULL) {
+ writel(vb2_dma_contig_plane_dma_addr(vb, 0),
+ pcdev->base_csi + CSIDMASA_FB2);
+ pcdev->fb2_active = buf;
+ dma_inten = CSICR1_FB2_DMA_INTEN;
+ }
+
+ if (dma_inten) {
+ list_del(&buf->internal.queue);
+ buf->state = MX2_STATE_ACTIVE;
+
+ csicr3 = readl(pcdev->base_csi + CSICR3);
+
+ /* Reflash DMA */
+ writel(csicr3 | CSICR3_DMA_REFLASH_RFF,
+ pcdev->base_csi + CSICR3);
+
+ /* clear & enable interrupts */
+ writel(dma_inten, pcdev->base_csi + CSISR);
+ pcdev->csicr1 |= dma_inten;
+ writel(pcdev->csicr1, pcdev->base_csi + CSICR1);
+
+ /* enable DMA */
+ csicr3 |= CSICR3_DMA_REQ_EN_RFF | CSICR3_RXFF_LEVEL(1);
+ writel(csicr3, pcdev->base_csi + CSICR3);
+ }
+ }
+
+ spin_unlock_irqrestore(&pcdev->lock, flags);
+}
+
+static void mx2_videobuf_release(struct vb2_buffer *vb)
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct mx2_camera_dev *pcdev = ici->priv;
+ struct mx2_buffer *buf = container_of(vb, struct mx2_buffer, vb);
+ unsigned long flags;
+
+#ifdef DEBUG
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
+ vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
+
+ switch (buf->state) {
+ case MX2_STATE_ACTIVE:
+ dev_info(icd->parent, "%s (active)\n", __func__);
+ break;
+ case MX2_STATE_QUEUED:
+ dev_info(icd->parent, "%s (queued)\n", __func__);
+ break;
+ default:
+ dev_info(icd->parent, "%s (unknown) %d\n", __func__,
+ buf->state);
+ break;
+ }
+#endif
+
+ /*
+ * Terminate only queued but inactive buffers. Active buffers are
+ * released when they become inactive after videobuf_waiton().
+ *
+ * FIXME: implement forced termination of active buffers for mx27 and
+ * mx27 eMMA, so that the user won't get stuck in an uninterruptible
+ * state. This requires a specific handling for each of the these DMA
+ * types.
+ */
+
+ spin_lock_irqsave(&pcdev->lock, flags);
+ if (cpu_is_mx25() && buf->state == MX2_STATE_ACTIVE) {
+ if (pcdev->fb1_active == buf) {
+ pcdev->csicr1 &= ~CSICR1_FB1_DMA_INTEN;
+ writel(0, pcdev->base_csi + CSIDMASA_FB1);
+ pcdev->fb1_active = NULL;
+ } else if (pcdev->fb2_active == buf) {
+ pcdev->csicr1 &= ~CSICR1_FB2_DMA_INTEN;
+ writel(0, pcdev->base_csi + CSIDMASA_FB2);
+ pcdev->fb2_active = NULL;
+ }
+ writel(pcdev->csicr1, pcdev->base_csi + CSICR1);
+ }
+ spin_unlock_irqrestore(&pcdev->lock, flags);
+}
+
+static void mx27_camera_emma_buf_init(struct soc_camera_device *icd,
+ int bytesperline)
+{
+ struct soc_camera_host *ici =
+ to_soc_camera_host(icd->parent);
+ struct mx2_camera_dev *pcdev = ici->priv;
+ struct mx2_fmt_cfg *prp = pcdev->emma_prp;
+
+ writel((pcdev->s_width << 16) | pcdev->s_height,
+ pcdev->base_emma + PRP_SRC_FRAME_SIZE);
+ writel(prp->cfg.src_pixel,
+ pcdev->base_emma + PRP_SRC_PIXEL_FORMAT_CNTL);
+ if (prp->cfg.channel == 1) {
+ writel((icd->user_width << 16) | icd->user_height,
+ pcdev->base_emma + PRP_CH1_OUT_IMAGE_SIZE);
+ writel(bytesperline,
+ pcdev->base_emma + PRP_DEST_CH1_LINE_STRIDE);
+ writel(prp->cfg.ch1_pixel,
+ pcdev->base_emma + PRP_CH1_PIXEL_FORMAT_CNTL);
+ } else { /* channel 2 */
+ writel((icd->user_width << 16) | icd->user_height,
+ pcdev->base_emma + PRP_CH2_OUT_IMAGE_SIZE);
+ }
+
+ /* Enable interrupts */
+ writel(prp->cfg.irq_flags, pcdev->base_emma + PRP_INTR_CNTL);
+}
+
+static void mx2_prp_resize_commit(struct mx2_camera_dev *pcdev)
+{
+ int dir;
+
+ for (dir = RESIZE_DIR_H; dir <= RESIZE_DIR_V; dir++) {
+ unsigned char *s = pcdev->resizing[dir].s;
+ int len = pcdev->resizing[dir].len;
+ unsigned int coeff[2] = {0, 0};
+ unsigned int valid = 0;
+ int i;
+
+ if (len == 0)
+ continue;
+
+ for (i = RESIZE_NUM_MAX - 1; i >= 0; i--) {
+ int j;
+
+ j = i > 9 ? 1 : 0;
+ coeff[j] = (coeff[j] << BC_COEF) |
+ (s[i] & (SZ_COEF - 1));
+
+ if (i == 5 || i == 15)
+ coeff[j] <<= 1;
+
+ valid = (valid << 1) | (s[i] >> BC_COEF);
+ }
+
+ valid |= PRP_RZ_VALID_TBL_LEN(len);
+
+ if (pcdev->resizing[dir].algo == RESIZE_ALGO_BILINEAR)
+ valid |= PRP_RZ_VALID_BILINEAR;
+
+ if (pcdev->emma_prp->cfg.channel == 1) {
+ if (dir == RESIZE_DIR_H) {
+ writel(coeff[0], pcdev->base_emma +
+ PRP_CH1_RZ_HORI_COEF1);
+ writel(coeff[1], pcdev->base_emma +
+ PRP_CH1_RZ_HORI_COEF2);
+ writel(valid, pcdev->base_emma +
+ PRP_CH1_RZ_HORI_VALID);
+ } else {
+ writel(coeff[0], pcdev->base_emma +
+ PRP_CH1_RZ_VERT_COEF1);
+ writel(coeff[1], pcdev->base_emma +
+ PRP_CH1_RZ_VERT_COEF2);
+ writel(valid, pcdev->base_emma +
+ PRP_CH1_RZ_VERT_VALID);
+ }
+ } else {
+ if (dir == RESIZE_DIR_H) {
+ writel(coeff[0], pcdev->base_emma +
+ PRP_CH2_RZ_HORI_COEF1);
+ writel(coeff[1], pcdev->base_emma +
+ PRP_CH2_RZ_HORI_COEF2);
+ writel(valid, pcdev->base_emma +
+ PRP_CH2_RZ_HORI_VALID);
+ } else {
+ writel(coeff[0], pcdev->base_emma +
+ PRP_CH2_RZ_VERT_COEF1);
+ writel(coeff[1], pcdev->base_emma +
+ PRP_CH2_RZ_VERT_COEF2);
+ writel(valid, pcdev->base_emma +
+ PRP_CH2_RZ_VERT_VALID);
+ }
+ }
+ }
+}
+
+static int mx2_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(q);
+ struct soc_camera_host *ici =
+ to_soc_camera_host(icd->parent);
+ struct mx2_camera_dev *pcdev = ici->priv;
+ struct mx2_fmt_cfg *prp = pcdev->emma_prp;
+ struct vb2_buffer *vb;
+ struct mx2_buffer *buf;
+ unsigned long phys;
+ int bytesperline;
+
+ if (cpu_is_mx27()) {
+ unsigned long flags;
+ if (count < 2)
+ return -EINVAL;
+
+ spin_lock_irqsave(&pcdev->lock, flags);
+
+ buf = list_first_entry(&pcdev->capture, struct mx2_buffer,
+ internal.queue);
+ buf->internal.bufnum = 0;
+ vb = &buf->vb;
+ buf->state = MX2_STATE_ACTIVE;
+
+ phys = vb2_dma_contig_plane_dma_addr(vb, 0);
+ mx27_update_emma_buf(pcdev, phys, buf->internal.bufnum);
+ list_move_tail(pcdev->capture.next, &pcdev->active_bufs);
+
+ buf = list_first_entry(&pcdev->capture, struct mx2_buffer,
+ internal.queue);
+ buf->internal.bufnum = 1;
+ vb = &buf->vb;
+ buf->state = MX2_STATE_ACTIVE;
+
+ phys = vb2_dma_contig_plane_dma_addr(vb, 0);
+ mx27_update_emma_buf(pcdev, phys, buf->internal.bufnum);
+ list_move_tail(pcdev->capture.next, &pcdev->active_bufs);
+
+ bytesperline = soc_mbus_bytes_per_line(icd->user_width,
+ icd->current_fmt->host_fmt);
+ if (bytesperline < 0)
+ return bytesperline;
+
+ /*
+ * I didn't manage to properly enable/disable the prp
+ * on a per frame basis during running transfers,
+ * thus we allocate a buffer here and use it to
+ * discard frames when no buffer is available.
+ * Feel free to work on this ;)
+ */
+ pcdev->discard_size = icd->user_height * bytesperline;
+ pcdev->discard_buffer = dma_alloc_coherent(ici->v4l2_dev.dev,
+ pcdev->discard_size, &pcdev->discard_buffer_dma,
+ GFP_KERNEL);
+ if (!pcdev->discard_buffer)
+ return -ENOMEM;
+
+ pcdev->buf_discard[0].discard = true;
+ list_add_tail(&pcdev->buf_discard[0].queue,
+ &pcdev->discard);
+
+ pcdev->buf_discard[1].discard = true;
+ list_add_tail(&pcdev->buf_discard[1].queue,
+ &pcdev->discard);
+
+ mx2_prp_resize_commit(pcdev);
+
+ mx27_camera_emma_buf_init(icd, bytesperline);
+
+ if (prp->cfg.channel == 1) {
+ writel(PRP_CNTL_CH1EN |
+ PRP_CNTL_CSIEN |
+ prp->cfg.in_fmt |
+ prp->cfg.out_fmt |
+ PRP_CNTL_CH1_LEN |
+ PRP_CNTL_CH1BYP |
+ PRP_CNTL_CH1_TSKIP(0) |
+ PRP_CNTL_IN_TSKIP(0),
+ pcdev->base_emma + PRP_CNTL);
+ } else {
+ writel(PRP_CNTL_CH2EN |
+ PRP_CNTL_CSIEN |
+ prp->cfg.in_fmt |
+ prp->cfg.out_fmt |
+ PRP_CNTL_CH2_LEN |
+ PRP_CNTL_CH2_TSKIP(0) |
+ PRP_CNTL_IN_TSKIP(0),
+ pcdev->base_emma + PRP_CNTL);
+ }
+ spin_unlock_irqrestore(&pcdev->lock, flags);
+ }
+
+ return 0;
+}
+
+static int mx2_stop_streaming(struct vb2_queue *q)
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(q);
+ struct soc_camera_host *ici =
+ to_soc_camera_host(icd->parent);
+ struct mx2_camera_dev *pcdev = ici->priv;
+ struct mx2_fmt_cfg *prp = pcdev->emma_prp;
+ unsigned long flags;
+ void *b;
+ u32 cntl;
+
+ if (cpu_is_mx27()) {
+ spin_lock_irqsave(&pcdev->lock, flags);
+
+ cntl = readl(pcdev->base_emma + PRP_CNTL);
+ if (prp->cfg.channel == 1) {
+ writel(cntl & ~PRP_CNTL_CH1EN,
+ pcdev->base_emma + PRP_CNTL);
+ } else {
+ writel(cntl & ~PRP_CNTL_CH2EN,
+ pcdev->base_emma + PRP_CNTL);
+ }
+ INIT_LIST_HEAD(&pcdev->capture);
+ INIT_LIST_HEAD(&pcdev->active_bufs);
+ INIT_LIST_HEAD(&pcdev->discard);
+
+ b = pcdev->discard_buffer;
+ pcdev->discard_buffer = NULL;
+
+ spin_unlock_irqrestore(&pcdev->lock, flags);
+
+ dma_free_coherent(ici->v4l2_dev.dev,
+ pcdev->discard_size, b, pcdev->discard_buffer_dma);
+ }
+
+ return 0;
+}
+
+static struct vb2_ops mx2_videobuf_ops = {
+ .queue_setup = mx2_videobuf_setup,
+ .buf_prepare = mx2_videobuf_prepare,
+ .buf_queue = mx2_videobuf_queue,
+ .buf_cleanup = mx2_videobuf_release,
+ .start_streaming = mx2_start_streaming,
+ .stop_streaming = mx2_stop_streaming,
+};
+
+static int mx2_camera_init_videobuf(struct vb2_queue *q,
+ struct soc_camera_device *icd)
+{
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->drv_priv = icd;
+ q->ops = &mx2_videobuf_ops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct mx2_buffer);
+
+ return vb2_queue_init(q);
+}
+
+#define MX2_BUS_FLAGS (V4L2_MBUS_MASTER | \
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH | \
+ V4L2_MBUS_VSYNC_ACTIVE_LOW | \
+ V4L2_MBUS_HSYNC_ACTIVE_HIGH | \
+ V4L2_MBUS_HSYNC_ACTIVE_LOW | \
+ V4L2_MBUS_PCLK_SAMPLE_RISING | \
+ V4L2_MBUS_PCLK_SAMPLE_FALLING | \
+ V4L2_MBUS_DATA_ACTIVE_HIGH | \
+ V4L2_MBUS_DATA_ACTIVE_LOW)
+
+static int mx27_camera_emma_prp_reset(struct mx2_camera_dev *pcdev)
+{
+ u32 cntl;
+ int count = 0;
+
+ cntl = readl(pcdev->base_emma + PRP_CNTL);
+ writel(PRP_CNTL_SWRST, pcdev->base_emma + PRP_CNTL);
+ while (count++ < 100) {
+ if (!(readl(pcdev->base_emma + PRP_CNTL) & PRP_CNTL_SWRST))
+ return 0;
+ barrier();
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int mx2_camera_set_bus_param(struct soc_camera_device *icd)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct mx2_camera_dev *pcdev = ici->priv;
+ struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ unsigned long common_flags;
+ int ret;
+ int bytesperline;
+ u32 csicr1 = pcdev->csicr1;
+
+ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+ if (!ret) {
+ common_flags = soc_mbus_config_compatible(&cfg, MX2_BUS_FLAGS);
+ if (!common_flags) {
+ dev_warn(icd->parent,
+ "Flags incompatible: camera 0x%x, host 0x%x\n",
+ cfg.flags, MX2_BUS_FLAGS);
+ return -EINVAL;
+ }
+ } else if (ret != -ENOIOCTLCMD) {
+ return ret;
+ } else {
+ common_flags = MX2_BUS_FLAGS;
+ }
+
+ if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
+ if (pcdev->platform_flags & MX2_CAMERA_HSYNC_HIGH)
+ common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW;
+ else
+ common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH;
+ }
+
+ if ((common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) &&
+ (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)) {
+ if (pcdev->platform_flags & MX2_CAMERA_PCLK_SAMPLE_RISING)
+ common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_FALLING;
+ else
+ common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_RISING;
+ }
+
+ cfg.flags = common_flags;
+ ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_dbg(icd->parent, "camera s_mbus_config(0x%lx) returned %d\n",
+ common_flags, ret);
+ return ret;
+ }
+
+ csicr1 = (csicr1 & ~CSICR1_FMT_MASK) | pcdev->emma_prp->cfg.csicr1;
+
+ if (common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
+ csicr1 |= CSICR1_REDGE;
+ if (common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
+ csicr1 |= CSICR1_SOF_POL;
+ if (common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
+ csicr1 |= CSICR1_HSYNC_POL;
+ if (pcdev->platform_flags & MX2_CAMERA_EXT_VSYNC)
+ csicr1 |= CSICR1_EXT_VSYNC;
+ if (pcdev->platform_flags & MX2_CAMERA_CCIR)
+ csicr1 |= CSICR1_CCIR_EN;
+ if (pcdev->platform_flags & MX2_CAMERA_CCIR_INTERLACE)
+ csicr1 |= CSICR1_CCIR_MODE;
+ if (pcdev->platform_flags & MX2_CAMERA_GATED_CLOCK)
+ csicr1 |= CSICR1_GCLK_MODE;
+ if (pcdev->platform_flags & MX2_CAMERA_INV_DATA)
+ csicr1 |= CSICR1_INV_DATA;
+
+ pcdev->csicr1 = csicr1;
+
+ bytesperline = soc_mbus_bytes_per_line(icd->user_width,
+ icd->current_fmt->host_fmt);
+ if (bytesperline < 0)
+ return bytesperline;
+
+ if (cpu_is_mx27()) {
+ ret = mx27_camera_emma_prp_reset(pcdev);
+ if (ret)
+ return ret;
+ } else if (cpu_is_mx25()) {
+ writel((bytesperline * icd->user_height) >> 2,
+ pcdev->base_csi + CSIRXCNT);
+ writel((bytesperline << 16) | icd->user_height,
+ pcdev->base_csi + CSIIMAG_PARA);
+ }
+
+ writel(pcdev->csicr1, pcdev->base_csi + CSICR1);
+
+ return 0;
+}
+
+static int mx2_camera_set_crop(struct soc_camera_device *icd,
+ struct v4l2_crop *a)
+{
+ struct v4l2_rect *rect = &a->c;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct v4l2_mbus_framefmt mf;
+ int ret;
+
+ soc_camera_limit_side(&rect->left, &rect->width, 0, 2, 4096);
+ soc_camera_limit_side(&rect->top, &rect->height, 0, 2, 4096);
+
+ ret = v4l2_subdev_call(sd, video, s_crop, a);
+ if (ret < 0)
+ return ret;
+
+ /* The capture device might have changed its output */
+ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(icd->parent, "Sensor cropped %dx%d\n",
+ mf.width, mf.height);
+
+ icd->user_width = mf.width;
+ icd->user_height = mf.height;
+
+ return ret;
+}
+
+static int mx2_camera_get_formats(struct soc_camera_device *icd,
+ unsigned int idx,
+ struct soc_camera_format_xlate *xlate)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ const struct soc_mbus_pixelfmt *fmt;
+ struct device *dev = icd->parent;
+ enum v4l2_mbus_pixelcode code;
+ int ret, formats = 0;
+
+ ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
+ if (ret < 0)
+ /* no more formats */
+ return 0;
+
+ fmt = soc_mbus_get_fmtdesc(code);
+ if (!fmt) {
+ dev_err(dev, "Invalid format code #%u: %d\n", idx, code);
+ return 0;
+ }
+
+ if (code == V4L2_MBUS_FMT_YUYV8_2X8 ||
+ code == V4L2_MBUS_FMT_UYVY8_2X8) {
+ formats++;
+ if (xlate) {
+ /*
+ * CH2 can output YUV420 which is a standard format in
+ * soc_mediabus.c
+ */
+ xlate->host_fmt =
+ soc_mbus_get_fmtdesc(V4L2_MBUS_FMT_YUYV8_1_5X8);
+ xlate->code = code;
+ dev_dbg(dev, "Providing host format %s for sensor code %d\n",
+ xlate->host_fmt->name, code);
+ xlate++;
+ }
+ }
+
+ if (code == V4L2_MBUS_FMT_UYVY8_2X8) {
+ formats++;
+ if (xlate) {
+ xlate->host_fmt =
+ soc_mbus_get_fmtdesc(V4L2_MBUS_FMT_YUYV8_2X8);
+ xlate->code = code;
+ dev_dbg(dev, "Providing host format %s for sensor code %d\n",
+ xlate->host_fmt->name, code);
+ xlate++;
+ }
+ }
+
+ /* Generic pass-trough */
+ formats++;
+ if (xlate) {
+ xlate->host_fmt = fmt;
+ xlate->code = code;
+ xlate++;
+ }
+ return formats;
+}
+
+static int mx2_emmaprp_resize(struct mx2_camera_dev *pcdev,
+ struct v4l2_mbus_framefmt *mf_in,
+ struct v4l2_pix_format *pix_out, bool apply)
+{
+ int num, den;
+ unsigned long m;
+ int i, dir;
+
+ for (dir = RESIZE_DIR_H; dir <= RESIZE_DIR_V; dir++) {
+ struct emma_prp_resize tmprsz;
+ unsigned char *s = tmprsz.s;
+ int len = 0;
+ int in, out;
+
+ if (dir == RESIZE_DIR_H) {
+ in = mf_in->width;
+ out = pix_out->width;
+ } else {
+ in = mf_in->height;
+ out = pix_out->height;
+ }
+
+ if (in < out)
+ return -EINVAL;
+ else if (in == out)
+ continue;
+
+ /* Calculate ratio */
+ m = gcd(in, out);
+ num = in / m;
+ den = out / m;
+ if (num > RESIZE_NUM_MAX)
+ return -EINVAL;
+
+ if ((num >= 2 * den) && (den == 1) &&
+ (num < 9) && (!(num & 0x01))) {
+ int sum = 0;
+ int j;
+
+ /* Average scaling for >= 2:1 ratios */
+ /* Support can be added for num >=9 and odd values */
+
+ tmprsz.algo = RESIZE_ALGO_AVERAGING;
+ len = num;
+
+ for (i = 0; i < (len / 2); i++)
+ s[i] = 8;
+
+ do {
+ for (i = 0; i < (len / 2); i++) {
+ s[i] = s[i] >> 1;
+ sum = 0;
+ for (j = 0; j < (len / 2); j++)
+ sum += s[j];
+ if (sum == 4)
+ break;
+ }
+ } while (sum != 4);
+
+ for (i = (len / 2); i < len; i++)
+ s[i] = s[len - i - 1];
+
+ s[len - 1] |= SZ_COEF;
+ } else {
+ /* bilinear scaling for < 2:1 ratios */
+ int v; /* overflow counter */
+ int coeff, nxt; /* table output */
+ int in_pos_inc = 2 * den;
+ int out_pos = num;
+ int out_pos_inc = 2 * num;
+ int init_carry = num - den;
+ int carry = init_carry;
+
+ tmprsz.algo = RESIZE_ALGO_BILINEAR;
+ v = den + in_pos_inc;
+ do {
+ coeff = v - out_pos;
+ out_pos += out_pos_inc;
+ carry += out_pos_inc;
+ for (nxt = 0; v < out_pos; nxt++) {
+ v += in_pos_inc;
+ carry -= in_pos_inc;
+ }
+
+ if (len > RESIZE_NUM_MAX)
+ return -EINVAL;
+
+ coeff = ((coeff << BC_COEF) +
+ (in_pos_inc >> 1)) / in_pos_inc;
+
+ if (coeff >= (SZ_COEF - 1))
+ coeff--;
+
+ coeff |= SZ_COEF;
+ s[len] = (unsigned char)coeff;
+ len++;
+
+ for (i = 1; i < nxt; i++) {
+ if (len >= RESIZE_NUM_MAX)
+ return -EINVAL;
+ s[len] = 0;
+ len++;
+ }
+ } while (carry != init_carry);
+ }
+ tmprsz.len = len;
+ if (dir == RESIZE_DIR_H)
+ mf_in->width = pix_out->width;
+ else
+ mf_in->height = pix_out->height;
+
+ if (apply)
+ memcpy(&pcdev->resizing[dir], &tmprsz, sizeof(tmprsz));
+ }
+ return 0;
+}
+
+static int mx2_camera_set_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct mx2_camera_dev *pcdev = ici->priv;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ const struct soc_camera_format_xlate *xlate;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_mbus_framefmt mf;
+ int ret;
+
+ dev_dbg(icd->parent, "%s: requested params: width = %d, height = %d\n",
+ __func__, pix->width, pix->height);
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+ if (!xlate) {
+ dev_warn(icd->parent, "Format %x not found\n",
+ pix->pixelformat);
+ return -EINVAL;
+ }
+
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
+
+ ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ /* Store width and height returned by the sensor for resizing */
+ pcdev->s_width = mf.width;
+ pcdev->s_height = mf.height;
+ dev_dbg(icd->parent, "%s: sensor params: width = %d, height = %d\n",
+ __func__, pcdev->s_width, pcdev->s_height);
+
+ pcdev->emma_prp = mx27_emma_prp_get_format(xlate->code,
+ xlate->host_fmt->fourcc);
+
+ memset(pcdev->resizing, 0, sizeof(pcdev->resizing));
+ if ((mf.width != pix->width || mf.height != pix->height) &&
+ pcdev->emma_prp->cfg.in_fmt == PRP_CNTL_DATA_IN_YUV422) {
+ if (mx2_emmaprp_resize(pcdev, &mf, pix, true) < 0)
+ dev_dbg(icd->parent, "%s: can't resize\n", __func__);
+ }
+
+ if (mf.code != xlate->code)
+ return -EINVAL;
+
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->field = mf.field;
+ pix->colorspace = mf.colorspace;
+ icd->current_fmt = xlate;
+
+ dev_dbg(icd->parent, "%s: returned params: width = %d, height = %d\n",
+ __func__, pix->width, pix->height);
+
+ return 0;
+}
+
+static int mx2_camera_try_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ const struct soc_camera_format_xlate *xlate;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_mbus_framefmt mf;
+ __u32 pixfmt = pix->pixelformat;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct mx2_camera_dev *pcdev = ici->priv;
+ unsigned int width_limit;
+ int ret;
+
+ dev_dbg(icd->parent, "%s: requested params: width = %d, height = %d\n",
+ __func__, pix->width, pix->height);
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+ if (pixfmt && !xlate) {
+ dev_warn(icd->parent, "Format %x not found\n", pixfmt);
+ return -EINVAL;
+ }
+
+ /* FIXME: implement MX27 limits */
+
+ /* limit to MX25 hardware capabilities */
+ if (cpu_is_mx25()) {
+ if (xlate->host_fmt->bits_per_sample <= 8)
+ width_limit = 0xffff * 4;
+ else
+ width_limit = 0xffff * 2;
+ /* CSIIMAG_PARA limit */
+ if (pix->width > width_limit)
+ pix->width = width_limit;
+ if (pix->height > 0xffff)
+ pix->height = 0xffff;
+
+ pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
+ xlate->host_fmt);
+ if (pix->bytesperline < 0)
+ return pix->bytesperline;
+ pix->sizeimage = soc_mbus_image_size(xlate->host_fmt,
+ pix->bytesperline, pix->height);
+ /* Check against the CSIRXCNT limit */
+ if (pix->sizeimage > 4 * 0x3ffff) {
+ /* Adjust geometry, preserve aspect ratio */
+ unsigned int new_height = int_sqrt(div_u64(0x3ffffULL *
+ 4 * pix->height, pix->bytesperline));
+ pix->width = new_height * pix->width / pix->height;
+ pix->height = new_height;
+ pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
+ xlate->host_fmt);
+ BUG_ON(pix->bytesperline < 0);
+ pix->sizeimage = soc_mbus_image_size(xlate->host_fmt,
+ pix->bytesperline, pix->height);
+ }
+ }
+
+ /* limit to sensor capabilities */
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
+
+ ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(icd->parent, "%s: sensor params: width = %d, height = %d\n",
+ __func__, pcdev->s_width, pcdev->s_height);
+
+ /* If the sensor does not support image size try PrP resizing */
+ pcdev->emma_prp = mx27_emma_prp_get_format(xlate->code,
+ xlate->host_fmt->fourcc);
+
+ memset(pcdev->resizing, 0, sizeof(pcdev->resizing));
+ if ((mf.width != pix->width || mf.height != pix->height) &&
+ pcdev->emma_prp->cfg.in_fmt == PRP_CNTL_DATA_IN_YUV422) {
+ if (mx2_emmaprp_resize(pcdev, &mf, pix, false) < 0)
+ dev_dbg(icd->parent, "%s: can't resize\n", __func__);
+ }
+
+ if (mf.field == V4L2_FIELD_ANY)
+ mf.field = V4L2_FIELD_NONE;
+ /*
+ * Driver supports interlaced images provided they have
+ * both fields so that they can be processed as if they
+ * were progressive.
+ */
+ if (mf.field != V4L2_FIELD_NONE && !V4L2_FIELD_HAS_BOTH(mf.field)) {
+ dev_err(icd->parent, "Field type %d unsupported.\n",
+ mf.field);
+ return -EINVAL;
+ }
+
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->field = mf.field;
+ pix->colorspace = mf.colorspace;
+
+ dev_dbg(icd->parent, "%s: returned params: width = %d, height = %d\n",
+ __func__, pix->width, pix->height);
+
+ return 0;
+}
+
+static int mx2_camera_querycap(struct soc_camera_host *ici,
+ struct v4l2_capability *cap)
+{
+ /* cap->name is set by the friendly caller:-> */
+ strlcpy(cap->card, MX2_CAM_DRIVER_DESCRIPTION, sizeof(cap->card));
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+
+ return 0;
+}
+
+static unsigned int mx2_camera_poll(struct file *file, poll_table *pt)
+{
+ struct soc_camera_device *icd = file->private_data;
+
+ return vb2_poll(&icd->vb2_vidq, file, pt);
+}
+
+static struct soc_camera_host_ops mx2_soc_camera_host_ops = {
+ .owner = THIS_MODULE,
+ .add = mx2_camera_add_device,
+ .remove = mx2_camera_remove_device,
+ .set_fmt = mx2_camera_set_fmt,
+ .set_crop = mx2_camera_set_crop,
+ .get_formats = mx2_camera_get_formats,
+ .try_fmt = mx2_camera_try_fmt,
+ .init_videobuf2 = mx2_camera_init_videobuf,
+ .poll = mx2_camera_poll,
+ .querycap = mx2_camera_querycap,
+ .set_bus_param = mx2_camera_set_bus_param,
+};
+
+static void mx27_camera_frame_done_emma(struct mx2_camera_dev *pcdev,
+ int bufnum, bool err)
+{
+#ifdef DEBUG
+ struct mx2_fmt_cfg *prp = pcdev->emma_prp;
+#endif
+ struct mx2_buf_internal *ibuf;
+ struct mx2_buffer *buf;
+ struct vb2_buffer *vb;
+ unsigned long phys;
+
+ ibuf = list_first_entry(&pcdev->active_bufs, struct mx2_buf_internal,
+ queue);
+
+ BUG_ON(ibuf->bufnum != bufnum);
+
+ if (ibuf->discard) {
+ /*
+ * Discard buffer must not be returned to user space.
+ * Just return it to the discard queue.
+ */
+ list_move_tail(pcdev->active_bufs.next, &pcdev->discard);
+ } else {
+ buf = mx2_ibuf_to_buf(ibuf);
+
+ vb = &buf->vb;
+#ifdef DEBUG
+ phys = vb2_dma_contig_plane_dma_addr(vb, 0);
+ if (prp->cfg.channel == 1) {
+ if (readl(pcdev->base_emma + PRP_DEST_RGB1_PTR +
+ 4 * bufnum) != phys) {
+ dev_err(pcdev->dev, "%lx != %x\n", phys,
+ readl(pcdev->base_emma +
+ PRP_DEST_RGB1_PTR + 4 * bufnum));
+ }
+ } else {
+ if (readl(pcdev->base_emma + PRP_DEST_Y_PTR -
+ 0x14 * bufnum) != phys) {
+ dev_err(pcdev->dev, "%lx != %x\n", phys,
+ readl(pcdev->base_emma +
+ PRP_DEST_Y_PTR - 0x14 * bufnum));
+ }
+ }
+#endif
+ dev_dbg(pcdev->dev, "%s (vb=0x%p) 0x%p %lu\n", __func__, vb,
+ vb2_plane_vaddr(vb, 0),
+ vb2_get_plane_payload(vb, 0));
+
+ list_del_init(&buf->internal.queue);
+ do_gettimeofday(&vb->v4l2_buf.timestamp);
+ vb->v4l2_buf.sequence = pcdev->frame_count;
+ if (err)
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+ else
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ }
+
+ pcdev->frame_count++;
+
+ if (list_empty(&pcdev->capture)) {
+ if (list_empty(&pcdev->discard)) {
+ dev_warn(pcdev->dev, "%s: trying to access empty discard list\n",
+ __func__);
+ return;
+ }
+
+ ibuf = list_first_entry(&pcdev->discard,
+ struct mx2_buf_internal, queue);
+ ibuf->bufnum = bufnum;
+
+ list_move_tail(pcdev->discard.next, &pcdev->active_bufs);
+ mx27_update_emma_buf(pcdev, pcdev->discard_buffer_dma, bufnum);
+ return;
+ }
+
+ buf = list_first_entry(&pcdev->capture, struct mx2_buffer,
+ internal.queue);
+
+ buf->internal.bufnum = bufnum;
+
+ list_move_tail(pcdev->capture.next, &pcdev->active_bufs);
+
+ vb = &buf->vb;
+ buf->state = MX2_STATE_ACTIVE;
+
+ phys = vb2_dma_contig_plane_dma_addr(vb, 0);
+ mx27_update_emma_buf(pcdev, phys, bufnum);
+}
+
+static irqreturn_t mx27_camera_emma_irq(int irq_emma, void *data)
+{
+ struct mx2_camera_dev *pcdev = data;
+ unsigned int status = readl(pcdev->base_emma + PRP_INTRSTATUS);
+ struct mx2_buf_internal *ibuf;
+
+ spin_lock(&pcdev->lock);
+
+ if (list_empty(&pcdev->active_bufs)) {
+ dev_warn(pcdev->dev, "%s: called while active list is empty\n",
+ __func__);
+
+ if (!status) {
+ spin_unlock(&pcdev->lock);
+ return IRQ_NONE;
+ }
+ }
+
+ if (status & (1 << 7)) { /* overflow */
+ u32 cntl = readl(pcdev->base_emma + PRP_CNTL);
+ writel(cntl & ~(PRP_CNTL_CH1EN | PRP_CNTL_CH2EN),
+ pcdev->base_emma + PRP_CNTL);
+ writel(cntl, pcdev->base_emma + PRP_CNTL);
+
+ ibuf = list_first_entry(&pcdev->active_bufs,
+ struct mx2_buf_internal, queue);
+ mx27_camera_frame_done_emma(pcdev,
+ ibuf->bufnum, true);
+
+ status &= ~(1 << 7);
+ } else if (((status & (3 << 5)) == (3 << 5)) ||
+ ((status & (3 << 3)) == (3 << 3))) {
+ /*
+ * Both buffers have triggered, process the one we're expecting
+ * to first
+ */
+ ibuf = list_first_entry(&pcdev->active_bufs,
+ struct mx2_buf_internal, queue);
+ mx27_camera_frame_done_emma(pcdev, ibuf->bufnum, false);
+ status &= ~(1 << (6 - ibuf->bufnum)); /* mark processed */
+ } else if ((status & (1 << 6)) || (status & (1 << 4))) {
+ mx27_camera_frame_done_emma(pcdev, 0, false);
+ } else if ((status & (1 << 5)) || (status & (1 << 3))) {
+ mx27_camera_frame_done_emma(pcdev, 1, false);
+ }
+
+ spin_unlock(&pcdev->lock);
+ writel(status, pcdev->base_emma + PRP_INTRSTATUS);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit mx27_camera_emma_init(struct platform_device *pdev)
+{
+ struct mx2_camera_dev *pcdev = platform_get_drvdata(pdev);
+ struct resource *res_emma;
+ int irq_emma;
+ int err = 0;
+
+ res_emma = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ irq_emma = platform_get_irq(pdev, 1);
+ if (!res_emma || !irq_emma) {
+ dev_err(pcdev->dev, "no EMMA resources\n");
+ goto out;
+ }
+
+ pcdev->base_emma = devm_request_and_ioremap(pcdev->dev, res_emma);
+ if (!pcdev->base_emma) {
+ err = -EADDRNOTAVAIL;
+ goto out;
+ }
+
+ err = devm_request_irq(pcdev->dev, irq_emma, mx27_camera_emma_irq, 0,
+ MX2_CAM_DRV_NAME, pcdev);
+ if (err) {
+ dev_err(pcdev->dev, "Camera EMMA interrupt register failed \n");
+ goto out;
+ }
+
+ pcdev->clk_emma_ipg = devm_clk_get(pcdev->dev, "emma-ipg");
+ if (IS_ERR(pcdev->clk_emma_ipg)) {
+ err = PTR_ERR(pcdev->clk_emma_ipg);
+ goto out;
+ }
+
+ clk_prepare_enable(pcdev->clk_emma_ipg);
+
+ pcdev->clk_emma_ahb = devm_clk_get(pcdev->dev, "emma-ahb");
+ if (IS_ERR(pcdev->clk_emma_ahb)) {
+ err = PTR_ERR(pcdev->clk_emma_ahb);
+ goto exit_clk_emma_ipg;
+ }
+
+ clk_prepare_enable(pcdev->clk_emma_ahb);
+
+ err = mx27_camera_emma_prp_reset(pcdev);
+ if (err)
+ goto exit_clk_emma_ahb;
+
+ return err;
+
+exit_clk_emma_ahb:
+ clk_disable_unprepare(pcdev->clk_emma_ahb);
+exit_clk_emma_ipg:
+ clk_disable_unprepare(pcdev->clk_emma_ipg);
+out:
+ return err;
+}
+
+static int __devinit mx2_camera_probe(struct platform_device *pdev)
+{
+ struct mx2_camera_dev *pcdev;
+ struct resource *res_csi;
+ int irq_csi;
+ int err = 0;
+
+ dev_dbg(&pdev->dev, "initialising\n");
+
+ res_csi = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq_csi = platform_get_irq(pdev, 0);
+ if (res_csi == NULL || irq_csi < 0) {
+ dev_err(&pdev->dev, "Missing platform resources data\n");
+ err = -ENODEV;
+ goto exit;
+ }
+
+ pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev), GFP_KERNEL);
+ if (!pcdev) {
+ dev_err(&pdev->dev, "Could not allocate pcdev\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ pcdev->clk_csi = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(pcdev->clk_csi)) {
+ dev_err(&pdev->dev, "Could not get csi clock\n");
+ err = PTR_ERR(pcdev->clk_csi);
+ goto exit;
+ }
+
+ pcdev->pdata = pdev->dev.platform_data;
+ if (pcdev->pdata) {
+ long rate;
+
+ pcdev->platform_flags = pcdev->pdata->flags;
+
+ rate = clk_round_rate(pcdev->clk_csi, pcdev->pdata->clk * 2);
+ if (rate <= 0) {
+ err = -ENODEV;
+ goto exit;
+ }
+ err = clk_set_rate(pcdev->clk_csi, rate);
+ if (err < 0)
+ goto exit;
+ }
+
+ INIT_LIST_HEAD(&pcdev->capture);
+ INIT_LIST_HEAD(&pcdev->active_bufs);
+ INIT_LIST_HEAD(&pcdev->discard);
+ spin_lock_init(&pcdev->lock);
+
+ pcdev->base_csi = devm_request_and_ioremap(&pdev->dev, res_csi);
+ if (!pcdev->base_csi) {
+ err = -EADDRNOTAVAIL;
+ goto exit;
+ }
+
+ pcdev->dev = &pdev->dev;
+ platform_set_drvdata(pdev, pcdev);
+
+ if (cpu_is_mx25()) {
+ err = devm_request_irq(&pdev->dev, irq_csi, mx25_camera_irq, 0,
+ MX2_CAM_DRV_NAME, pcdev);
+ if (err) {
+ dev_err(pcdev->dev, "Camera interrupt register failed \n");
+ goto exit;
+ }
+ }
+
+ if (cpu_is_mx27()) {
+ err = mx27_camera_emma_init(pdev);
+ if (err)
+ goto exit;
+ }
+
+ /*
+ * We're done with drvdata here. Clear the pointer so that
+ * v4l2 core can start using drvdata on its purpose.
+ */
+ platform_set_drvdata(pdev, NULL);
+
+ pcdev->soc_host.drv_name = MX2_CAM_DRV_NAME,
+ pcdev->soc_host.ops = &mx2_soc_camera_host_ops,
+ pcdev->soc_host.priv = pcdev;
+ pcdev->soc_host.v4l2_dev.dev = &pdev->dev;
+ pcdev->soc_host.nr = pdev->id;
+ if (cpu_is_mx25())
+ pcdev->soc_host.capabilities = SOCAM_HOST_CAP_STRIDE;
+
+ pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+ if (IS_ERR(pcdev->alloc_ctx)) {
+ err = PTR_ERR(pcdev->alloc_ctx);
+ goto eallocctx;
+ }
+ err = soc_camera_host_register(&pcdev->soc_host);
+ if (err)
+ goto exit_free_emma;
+
+ dev_info(&pdev->dev, "MX2 Camera (CSI) driver probed, clock frequency: %ld\n",
+ clk_get_rate(pcdev->clk_csi));
+
+ return 0;
+
+exit_free_emma:
+ vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
+eallocctx:
+ if (cpu_is_mx27()) {
+ clk_disable_unprepare(pcdev->clk_emma_ipg);
+ clk_disable_unprepare(pcdev->clk_emma_ahb);
+ }
+exit:
+ return err;
+}
+
+static int __devexit mx2_camera_remove(struct platform_device *pdev)
+{
+ struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
+ struct mx2_camera_dev *pcdev = container_of(soc_host,
+ struct mx2_camera_dev, soc_host);
+
+ soc_camera_host_unregister(&pcdev->soc_host);
+
+ vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
+
+ if (cpu_is_mx27()) {
+ clk_disable_unprepare(pcdev->clk_emma_ipg);
+ clk_disable_unprepare(pcdev->clk_emma_ahb);
+ }
+
+ dev_info(&pdev->dev, "MX2 Camera driver unloaded\n");
+
+ return 0;
+}
+
+static struct platform_driver mx2_camera_driver = {
+ .driver = {
+ .name = MX2_CAM_DRV_NAME,
+ },
+ .remove = __devexit_p(mx2_camera_remove),
+};
+
+
+static int __init mx2_camera_init(void)
+{
+ return platform_driver_probe(&mx2_camera_driver, &mx2_camera_probe);
+}
+
+static void __exit mx2_camera_exit(void)
+{
+ return platform_driver_unregister(&mx2_camera_driver);
+}
+
+module_init(mx2_camera_init);
+module_exit(mx2_camera_exit);
+
+MODULE_DESCRIPTION("i.MX27/i.MX25 SoC Camera Host driver");
+MODULE_AUTHOR("Sascha Hauer <sha@pengutronix.de>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(MX2_CAM_VERSION);
diff --git a/drivers/media/platform/soc_camera/mx3_camera.c b/drivers/media/platform/soc_camera/mx3_camera.c
new file mode 100644
index 0000000..3557ac9
--- /dev/null
+++ b/drivers/media/platform/soc_camera/mx3_camera.c
@@ -0,0 +1,1290 @@
+/*
+ * V4L2 Driver for i.MX3x camera host
+ *
+ * Copyright (C) 2008
+ * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/videodev2.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-dev.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/soc_camera.h>
+#include <media/soc_mediabus.h>
+
+#include <mach/ipu.h>
+#include <linux/platform_data/camera-mx3.h>
+#include <linux/platform_data/dma-imx.h>
+
+#define MX3_CAM_DRV_NAME "mx3-camera"
+
+/* CMOS Sensor Interface Registers */
+#define CSI_REG_START 0x60
+
+#define CSI_SENS_CONF (0x60 - CSI_REG_START)
+#define CSI_SENS_FRM_SIZE (0x64 - CSI_REG_START)
+#define CSI_ACT_FRM_SIZE (0x68 - CSI_REG_START)
+#define CSI_OUT_FRM_CTRL (0x6C - CSI_REG_START)
+#define CSI_TST_CTRL (0x70 - CSI_REG_START)
+#define CSI_CCIR_CODE_1 (0x74 - CSI_REG_START)
+#define CSI_CCIR_CODE_2 (0x78 - CSI_REG_START)
+#define CSI_CCIR_CODE_3 (0x7C - CSI_REG_START)
+#define CSI_FLASH_STROBE_1 (0x80 - CSI_REG_START)
+#define CSI_FLASH_STROBE_2 (0x84 - CSI_REG_START)
+
+#define CSI_SENS_CONF_VSYNC_POL_SHIFT 0
+#define CSI_SENS_CONF_HSYNC_POL_SHIFT 1
+#define CSI_SENS_CONF_DATA_POL_SHIFT 2
+#define CSI_SENS_CONF_PIX_CLK_POL_SHIFT 3
+#define CSI_SENS_CONF_SENS_PRTCL_SHIFT 4
+#define CSI_SENS_CONF_SENS_CLKSRC_SHIFT 7
+#define CSI_SENS_CONF_DATA_FMT_SHIFT 8
+#define CSI_SENS_CONF_DATA_WIDTH_SHIFT 10
+#define CSI_SENS_CONF_EXT_VSYNC_SHIFT 15
+#define CSI_SENS_CONF_DIVRATIO_SHIFT 16
+
+#define CSI_SENS_CONF_DATA_FMT_RGB_YUV444 (0UL << CSI_SENS_CONF_DATA_FMT_SHIFT)
+#define CSI_SENS_CONF_DATA_FMT_YUV422 (2UL << CSI_SENS_CONF_DATA_FMT_SHIFT)
+#define CSI_SENS_CONF_DATA_FMT_BAYER (3UL << CSI_SENS_CONF_DATA_FMT_SHIFT)
+
+#define MAX_VIDEO_MEM 16
+
+struct mx3_camera_buffer {
+ /* common v4l buffer stuff -- must be first */
+ struct vb2_buffer vb;
+ struct list_head queue;
+
+ /* One descriptot per scatterlist (per frame) */
+ struct dma_async_tx_descriptor *txd;
+
+ /* We have to "build" a scatterlist ourselves - one element per frame */
+ struct scatterlist sg;
+};
+
+/**
+ * struct mx3_camera_dev - i.MX3x camera (CSI) object
+ * @dev: camera device, to which the coherent buffer is attached
+ * @icd: currently attached camera sensor
+ * @clk: pointer to clock
+ * @base: remapped register base address
+ * @pdata: platform data
+ * @platform_flags: platform flags
+ * @mclk: master clock frequency in Hz
+ * @capture: list of capture videobuffers
+ * @lock: protects video buffer lists
+ * @active: active video buffer
+ * @idmac_channel: array of pointers to IPU DMAC DMA channels
+ * @soc_host: embedded soc_host object
+ */
+struct mx3_camera_dev {
+ /*
+ * i.MX3x is only supposed to handle one camera on its Camera Sensor
+ * Interface. If anyone ever builds hardware to enable more than one
+ * camera _simultaneously_, they will have to modify this driver too
+ */
+ struct soc_camera_device *icd;
+ struct clk *clk;
+
+ void __iomem *base;
+
+ struct mx3_camera_pdata *pdata;
+
+ unsigned long platform_flags;
+ unsigned long mclk;
+ u16 width_flags; /* max 15 bits */
+
+ struct list_head capture;
+ spinlock_t lock; /* Protects video buffer lists */
+ struct mx3_camera_buffer *active;
+ size_t buf_total;
+ struct vb2_alloc_ctx *alloc_ctx;
+ enum v4l2_field field;
+ int sequence;
+
+ /* IDMAC / dmaengine interface */
+ struct idmac_channel *idmac_channel[1]; /* We need one channel */
+
+ struct soc_camera_host soc_host;
+};
+
+struct dma_chan_request {
+ struct mx3_camera_dev *mx3_cam;
+ enum ipu_channel id;
+};
+
+static u32 csi_reg_read(struct mx3_camera_dev *mx3, off_t reg)
+{
+ return __raw_readl(mx3->base + reg);
+}
+
+static void csi_reg_write(struct mx3_camera_dev *mx3, u32 value, off_t reg)
+{
+ __raw_writel(value, mx3->base + reg);
+}
+
+static struct mx3_camera_buffer *to_mx3_vb(struct vb2_buffer *vb)
+{
+ return container_of(vb, struct mx3_camera_buffer, vb);
+}
+
+/* Called from the IPU IDMAC ISR */
+static void mx3_cam_dma_done(void *arg)
+{
+ struct idmac_tx_desc *desc = to_tx_desc(arg);
+ struct dma_chan *chan = desc->txd.chan;
+ struct idmac_channel *ichannel = to_idmac_chan(chan);
+ struct mx3_camera_dev *mx3_cam = ichannel->client;
+
+ dev_dbg(chan->device->dev, "callback cookie %d, active DMA 0x%08x\n",
+ desc->txd.cookie, mx3_cam->active ? sg_dma_address(&mx3_cam->active->sg) : 0);
+
+ spin_lock(&mx3_cam->lock);
+ if (mx3_cam->active) {
+ struct vb2_buffer *vb = &mx3_cam->active->vb;
+ struct mx3_camera_buffer *buf = to_mx3_vb(vb);
+
+ list_del_init(&buf->queue);
+ do_gettimeofday(&vb->v4l2_buf.timestamp);
+ vb->v4l2_buf.field = mx3_cam->field;
+ vb->v4l2_buf.sequence = mx3_cam->sequence++;
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ }
+
+ if (list_empty(&mx3_cam->capture)) {
+ mx3_cam->active = NULL;
+ spin_unlock(&mx3_cam->lock);
+
+ /*
+ * stop capture - without further buffers IPU_CHA_BUF0_RDY will
+ * not get updated
+ */
+ return;
+ }
+
+ mx3_cam->active = list_entry(mx3_cam->capture.next,
+ struct mx3_camera_buffer, queue);
+ spin_unlock(&mx3_cam->lock);
+}
+
+/*
+ * Videobuf operations
+ */
+
+/*
+ * Calculate the __buffer__ (not data) size and number of buffers.
+ */
+static int mx3_videobuf_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *count, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct mx3_camera_dev *mx3_cam = ici->priv;
+
+ if (!mx3_cam->idmac_channel[0])
+ return -EINVAL;
+
+ if (fmt) {
+ const struct soc_camera_format_xlate *xlate = soc_camera_xlate_by_fourcc(icd,
+ fmt->fmt.pix.pixelformat);
+ unsigned int bytes_per_line;
+ int ret;
+
+ if (!xlate)
+ return -EINVAL;
+
+ ret = soc_mbus_bytes_per_line(fmt->fmt.pix.width,
+ xlate->host_fmt);
+ if (ret < 0)
+ return ret;
+
+ bytes_per_line = max_t(u32, fmt->fmt.pix.bytesperline, ret);
+
+ ret = soc_mbus_image_size(xlate->host_fmt, bytes_per_line,
+ fmt->fmt.pix.height);
+ if (ret < 0)
+ return ret;
+
+ sizes[0] = max_t(u32, fmt->fmt.pix.sizeimage, ret);
+ } else {
+ /* Called from VIDIOC_REQBUFS or in compatibility mode */
+ sizes[0] = icd->sizeimage;
+ }
+
+ alloc_ctxs[0] = mx3_cam->alloc_ctx;
+
+ if (!vq->num_buffers)
+ mx3_cam->sequence = 0;
+
+ if (!*count)
+ *count = 2;
+
+ /* If *num_planes != 0, we have already verified *count. */
+ if (!*num_planes &&
+ sizes[0] * *count + mx3_cam->buf_total > MAX_VIDEO_MEM * 1024 * 1024)
+ *count = (MAX_VIDEO_MEM * 1024 * 1024 - mx3_cam->buf_total) /
+ sizes[0];
+
+ *num_planes = 1;
+
+ return 0;
+}
+
+static enum pixel_fmt fourcc_to_ipu_pix(__u32 fourcc)
+{
+ /* Add more formats as need arises and test possibilities appear... */
+ switch (fourcc) {
+ case V4L2_PIX_FMT_RGB24:
+ return IPU_PIX_FMT_RGB24;
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_RGB565:
+ default:
+ return IPU_PIX_FMT_GENERIC;
+ }
+}
+
+static void mx3_videobuf_queue(struct vb2_buffer *vb)
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct mx3_camera_dev *mx3_cam = ici->priv;
+ struct mx3_camera_buffer *buf = to_mx3_vb(vb);
+ struct scatterlist *sg = &buf->sg;
+ struct dma_async_tx_descriptor *txd;
+ struct idmac_channel *ichan = mx3_cam->idmac_channel[0];
+ struct idmac_video_param *video = &ichan->params.video;
+ const struct soc_mbus_pixelfmt *host_fmt = icd->current_fmt->host_fmt;
+ unsigned long flags;
+ dma_cookie_t cookie;
+ size_t new_size;
+
+ new_size = icd->sizeimage;
+
+ if (vb2_plane_size(vb, 0) < new_size) {
+ dev_err(icd->parent, "Buffer #%d too small (%lu < %zu)\n",
+ vb->v4l2_buf.index, vb2_plane_size(vb, 0), new_size);
+ goto error;
+ }
+
+ if (!buf->txd) {
+ sg_dma_address(sg) = vb2_dma_contig_plane_dma_addr(vb, 0);
+ sg_dma_len(sg) = new_size;
+
+ txd = dmaengine_prep_slave_sg(
+ &ichan->dma_chan, sg, 1, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!txd)
+ goto error;
+
+ txd->callback_param = txd;
+ txd->callback = mx3_cam_dma_done;
+
+ buf->txd = txd;
+ } else {
+ txd = buf->txd;
+ }
+
+ vb2_set_plane_payload(vb, 0, new_size);
+
+ /* This is the configuration of one sg-element */
+ video->out_pixel_fmt = fourcc_to_ipu_pix(host_fmt->fourcc);
+
+ if (video->out_pixel_fmt == IPU_PIX_FMT_GENERIC) {
+ /*
+ * If the IPU DMA channel is configured to transfer generic
+ * 8-bit data, we have to set up the geometry parameters
+ * correctly, according to the current pixel format. The DMA
+ * horizontal parameters in this case are expressed in bytes,
+ * not in pixels.
+ */
+ video->out_width = icd->bytesperline;
+ video->out_height = icd->user_height;
+ video->out_stride = icd->bytesperline;
+ } else {
+ /*
+ * For IPU known formats the pixel unit will be managed
+ * successfully by the IPU code
+ */
+ video->out_width = icd->user_width;
+ video->out_height = icd->user_height;
+ video->out_stride = icd->user_width;
+ }
+
+#ifdef DEBUG
+ /* helps to see what DMA actually has written */
+ if (vb2_plane_vaddr(vb, 0))
+ memset(vb2_plane_vaddr(vb, 0), 0xaa, vb2_get_plane_payload(vb, 0));
+#endif
+
+ spin_lock_irqsave(&mx3_cam->lock, flags);
+ list_add_tail(&buf->queue, &mx3_cam->capture);
+
+ if (!mx3_cam->active)
+ mx3_cam->active = buf;
+
+ spin_unlock_irq(&mx3_cam->lock);
+
+ cookie = txd->tx_submit(txd);
+ dev_dbg(icd->parent, "Submitted cookie %d DMA 0x%08x\n",
+ cookie, sg_dma_address(&buf->sg));
+
+ if (cookie >= 0)
+ return;
+
+ spin_lock_irq(&mx3_cam->lock);
+
+ /* Submit error */
+ list_del_init(&buf->queue);
+
+ if (mx3_cam->active == buf)
+ mx3_cam->active = NULL;
+
+ spin_unlock_irqrestore(&mx3_cam->lock, flags);
+error:
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+}
+
+static void mx3_videobuf_release(struct vb2_buffer *vb)
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct mx3_camera_dev *mx3_cam = ici->priv;
+ struct mx3_camera_buffer *buf = to_mx3_vb(vb);
+ struct dma_async_tx_descriptor *txd = buf->txd;
+ unsigned long flags;
+
+ dev_dbg(icd->parent,
+ "Release%s DMA 0x%08x, queue %sempty\n",
+ mx3_cam->active == buf ? " active" : "", sg_dma_address(&buf->sg),
+ list_empty(&buf->queue) ? "" : "not ");
+
+ spin_lock_irqsave(&mx3_cam->lock, flags);
+
+ if (mx3_cam->active == buf)
+ mx3_cam->active = NULL;
+
+ /* Doesn't hurt also if the list is empty */
+ list_del_init(&buf->queue);
+
+ if (txd) {
+ buf->txd = NULL;
+ if (mx3_cam->idmac_channel[0])
+ async_tx_ack(txd);
+ }
+
+ spin_unlock_irqrestore(&mx3_cam->lock, flags);
+
+ mx3_cam->buf_total -= vb2_plane_size(vb, 0);
+}
+
+static int mx3_videobuf_init(struct vb2_buffer *vb)
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct mx3_camera_dev *mx3_cam = ici->priv;
+ struct mx3_camera_buffer *buf = to_mx3_vb(vb);
+
+ if (!buf->txd) {
+ /* This is for locking debugging only */
+ INIT_LIST_HEAD(&buf->queue);
+ sg_init_table(&buf->sg, 1);
+
+ mx3_cam->buf_total += vb2_plane_size(vb, 0);
+ }
+
+ return 0;
+}
+
+static int mx3_stop_streaming(struct vb2_queue *q)
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(q);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct mx3_camera_dev *mx3_cam = ici->priv;
+ struct idmac_channel *ichan = mx3_cam->idmac_channel[0];
+ struct mx3_camera_buffer *buf, *tmp;
+ unsigned long flags;
+
+ if (ichan) {
+ struct dma_chan *chan = &ichan->dma_chan;
+ chan->device->device_control(chan, DMA_PAUSE, 0);
+ }
+
+ spin_lock_irqsave(&mx3_cam->lock, flags);
+
+ mx3_cam->active = NULL;
+
+ list_for_each_entry_safe(buf, tmp, &mx3_cam->capture, queue) {
+ list_del_init(&buf->queue);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+
+ spin_unlock_irqrestore(&mx3_cam->lock, flags);
+
+ return 0;
+}
+
+static struct vb2_ops mx3_videobuf_ops = {
+ .queue_setup = mx3_videobuf_setup,
+ .buf_queue = mx3_videobuf_queue,
+ .buf_cleanup = mx3_videobuf_release,
+ .buf_init = mx3_videobuf_init,
+ .wait_prepare = soc_camera_unlock,
+ .wait_finish = soc_camera_lock,
+ .stop_streaming = mx3_stop_streaming,
+};
+
+static int mx3_camera_init_videobuf(struct vb2_queue *q,
+ struct soc_camera_device *icd)
+{
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->drv_priv = icd;
+ q->ops = &mx3_videobuf_ops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct mx3_camera_buffer);
+
+ return vb2_queue_init(q);
+}
+
+/* First part of ipu_csi_init_interface() */
+static void mx3_camera_activate(struct mx3_camera_dev *mx3_cam,
+ struct soc_camera_device *icd)
+{
+ u32 conf;
+ long rate;
+
+ /* Set default size: ipu_csi_set_window_size() */
+ csi_reg_write(mx3_cam, (640 - 1) | ((480 - 1) << 16), CSI_ACT_FRM_SIZE);
+ /* ...and position to 0:0: ipu_csi_set_window_pos() */
+ conf = csi_reg_read(mx3_cam, CSI_OUT_FRM_CTRL) & 0xffff0000;
+ csi_reg_write(mx3_cam, conf, CSI_OUT_FRM_CTRL);
+
+ /* We use only gated clock synchronisation mode so far */
+ conf = 0 << CSI_SENS_CONF_SENS_PRTCL_SHIFT;
+
+ /* Set generic data, platform-biggest bus-width */
+ conf |= CSI_SENS_CONF_DATA_FMT_BAYER;
+
+ if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_15)
+ conf |= 3 << CSI_SENS_CONF_DATA_WIDTH_SHIFT;
+ else if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_10)
+ conf |= 2 << CSI_SENS_CONF_DATA_WIDTH_SHIFT;
+ else if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_8)
+ conf |= 1 << CSI_SENS_CONF_DATA_WIDTH_SHIFT;
+ else/* if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_4)*/
+ conf |= 0 << CSI_SENS_CONF_DATA_WIDTH_SHIFT;
+
+ if (mx3_cam->platform_flags & MX3_CAMERA_CLK_SRC)
+ conf |= 1 << CSI_SENS_CONF_SENS_CLKSRC_SHIFT;
+ if (mx3_cam->platform_flags & MX3_CAMERA_EXT_VSYNC)
+ conf |= 1 << CSI_SENS_CONF_EXT_VSYNC_SHIFT;
+ if (mx3_cam->platform_flags & MX3_CAMERA_DP)
+ conf |= 1 << CSI_SENS_CONF_DATA_POL_SHIFT;
+ if (mx3_cam->platform_flags & MX3_CAMERA_PCP)
+ conf |= 1 << CSI_SENS_CONF_PIX_CLK_POL_SHIFT;
+ if (mx3_cam->platform_flags & MX3_CAMERA_HSP)
+ conf |= 1 << CSI_SENS_CONF_HSYNC_POL_SHIFT;
+ if (mx3_cam->platform_flags & MX3_CAMERA_VSP)
+ conf |= 1 << CSI_SENS_CONF_VSYNC_POL_SHIFT;
+
+ /* ipu_csi_init_interface() */
+ csi_reg_write(mx3_cam, conf, CSI_SENS_CONF);
+
+ clk_prepare_enable(mx3_cam->clk);
+ rate = clk_round_rate(mx3_cam->clk, mx3_cam->mclk);
+ dev_dbg(icd->parent, "Set SENS_CONF to %x, rate %ld\n", conf, rate);
+ if (rate)
+ clk_set_rate(mx3_cam->clk, rate);
+}
+
+/* Called with .video_lock held */
+static int mx3_camera_add_device(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct mx3_camera_dev *mx3_cam = ici->priv;
+
+ if (mx3_cam->icd)
+ return -EBUSY;
+
+ mx3_camera_activate(mx3_cam, icd);
+
+ mx3_cam->buf_total = 0;
+ mx3_cam->icd = icd;
+
+ dev_info(icd->parent, "MX3 Camera driver attached to camera %d\n",
+ icd->devnum);
+
+ return 0;
+}
+
+/* Called with .video_lock held */
+static void mx3_camera_remove_device(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct mx3_camera_dev *mx3_cam = ici->priv;
+ struct idmac_channel **ichan = &mx3_cam->idmac_channel[0];
+
+ BUG_ON(icd != mx3_cam->icd);
+
+ if (*ichan) {
+ dma_release_channel(&(*ichan)->dma_chan);
+ *ichan = NULL;
+ }
+
+ clk_disable_unprepare(mx3_cam->clk);
+
+ mx3_cam->icd = NULL;
+
+ dev_info(icd->parent, "MX3 Camera driver detached from camera %d\n",
+ icd->devnum);
+}
+
+static int test_platform_param(struct mx3_camera_dev *mx3_cam,
+ unsigned char buswidth, unsigned long *flags)
+{
+ /*
+ * If requested data width is supported by the platform, use it or any
+ * possible lower value - i.MX31 is smart enough to shift bits
+ */
+ if (buswidth > fls(mx3_cam->width_flags))
+ return -EINVAL;
+
+ /*
+ * Platform specified synchronization and pixel clock polarities are
+ * only a recommendation and are only used during probing. MX3x
+ * camera interface only works in master mode, i.e., uses HSYNC and
+ * VSYNC signals from the sensor
+ */
+ *flags = V4L2_MBUS_MASTER |
+ V4L2_MBUS_HSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_HSYNC_ACTIVE_LOW |
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_VSYNC_ACTIVE_LOW |
+ V4L2_MBUS_PCLK_SAMPLE_RISING |
+ V4L2_MBUS_PCLK_SAMPLE_FALLING |
+ V4L2_MBUS_DATA_ACTIVE_HIGH |
+ V4L2_MBUS_DATA_ACTIVE_LOW;
+
+ return 0;
+}
+
+static int mx3_camera_try_bus_param(struct soc_camera_device *icd,
+ const unsigned int depth)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct mx3_camera_dev *mx3_cam = ici->priv;
+ struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ unsigned long bus_flags, common_flags;
+ int ret = test_platform_param(mx3_cam, depth, &bus_flags);
+
+ dev_dbg(icd->parent, "request bus width %d bit: %d\n", depth, ret);
+
+ if (ret < 0)
+ return ret;
+
+ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+ if (!ret) {
+ common_flags = soc_mbus_config_compatible(&cfg,
+ bus_flags);
+ if (!common_flags) {
+ dev_warn(icd->parent,
+ "Flags incompatible: camera 0x%x, host 0x%lx\n",
+ cfg.flags, bus_flags);
+ return -EINVAL;
+ }
+ } else if (ret != -ENOIOCTLCMD) {
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool chan_filter(struct dma_chan *chan, void *arg)
+{
+ struct dma_chan_request *rq = arg;
+ struct mx3_camera_pdata *pdata;
+
+ if (!imx_dma_is_ipu(chan))
+ return false;
+
+ if (!rq)
+ return false;
+
+ pdata = rq->mx3_cam->soc_host.v4l2_dev.dev->platform_data;
+
+ return rq->id == chan->chan_id &&
+ pdata->dma_dev == chan->device->dev;
+}
+
+static const struct soc_mbus_pixelfmt mx3_camera_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .name = "Bayer BGGR (sRGB) 8 bit",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_NONE,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ }, {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .name = "Monochrome 8 bit",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_NONE,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+};
+
+/* This will be corrected as we get more formats */
+static bool mx3_camera_packing_supported(const struct soc_mbus_pixelfmt *fmt)
+{
+ return fmt->packing == SOC_MBUS_PACKING_NONE ||
+ (fmt->bits_per_sample == 8 &&
+ fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) ||
+ (fmt->bits_per_sample > 8 &&
+ fmt->packing == SOC_MBUS_PACKING_EXTEND16);
+}
+
+static int mx3_camera_get_formats(struct soc_camera_device *icd, unsigned int idx,
+ struct soc_camera_format_xlate *xlate)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct device *dev = icd->parent;
+ int formats = 0, ret;
+ enum v4l2_mbus_pixelcode code;
+ const struct soc_mbus_pixelfmt *fmt;
+
+ ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
+ if (ret < 0)
+ /* No more formats */
+ return 0;
+
+ fmt = soc_mbus_get_fmtdesc(code);
+ if (!fmt) {
+ dev_warn(icd->parent,
+ "Unsupported format code #%u: %d\n", idx, code);
+ return 0;
+ }
+
+ /* This also checks support for the requested bits-per-sample */
+ ret = mx3_camera_try_bus_param(icd, fmt->bits_per_sample);
+ if (ret < 0)
+ return 0;
+
+ switch (code) {
+ case V4L2_MBUS_FMT_SBGGR10_1X10:
+ formats++;
+ if (xlate) {
+ xlate->host_fmt = &mx3_camera_formats[0];
+ xlate->code = code;
+ xlate++;
+ dev_dbg(dev, "Providing format %s using code %d\n",
+ mx3_camera_formats[0].name, code);
+ }
+ break;
+ case V4L2_MBUS_FMT_Y10_1X10:
+ formats++;
+ if (xlate) {
+ xlate->host_fmt = &mx3_camera_formats[1];
+ xlate->code = code;
+ xlate++;
+ dev_dbg(dev, "Providing format %s using code %d\n",
+ mx3_camera_formats[1].name, code);
+ }
+ break;
+ default:
+ if (!mx3_camera_packing_supported(fmt))
+ return 0;
+ }
+
+ /* Generic pass-through */
+ formats++;
+ if (xlate) {
+ xlate->host_fmt = fmt;
+ xlate->code = code;
+ dev_dbg(dev, "Providing format %c%c%c%c in pass-through mode\n",
+ (fmt->fourcc >> (0*8)) & 0xFF,
+ (fmt->fourcc >> (1*8)) & 0xFF,
+ (fmt->fourcc >> (2*8)) & 0xFF,
+ (fmt->fourcc >> (3*8)) & 0xFF);
+ xlate++;
+ }
+
+ return formats;
+}
+
+static void configure_geometry(struct mx3_camera_dev *mx3_cam,
+ unsigned int width, unsigned int height,
+ const struct soc_mbus_pixelfmt *fmt)
+{
+ u32 ctrl, width_field, height_field;
+
+ if (fourcc_to_ipu_pix(fmt->fourcc) == IPU_PIX_FMT_GENERIC) {
+ /*
+ * As the CSI will be configured to output BAYER, here
+ * the width parameter count the number of samples to
+ * capture to complete the whole image width.
+ */
+ unsigned int num, den;
+ int ret = soc_mbus_samples_per_pixel(fmt, &num, &den);
+ BUG_ON(ret < 0);
+ width = width * num / den;
+ }
+
+ /* Setup frame size - this cannot be changed on-the-fly... */
+ width_field = width - 1;
+ height_field = height - 1;
+ csi_reg_write(mx3_cam, width_field | (height_field << 16), CSI_SENS_FRM_SIZE);
+
+ csi_reg_write(mx3_cam, width_field << 16, CSI_FLASH_STROBE_1);
+ csi_reg_write(mx3_cam, (height_field << 16) | 0x22, CSI_FLASH_STROBE_2);
+
+ csi_reg_write(mx3_cam, width_field | (height_field << 16), CSI_ACT_FRM_SIZE);
+
+ /* ...and position */
+ ctrl = csi_reg_read(mx3_cam, CSI_OUT_FRM_CTRL) & 0xffff0000;
+ /* Sensor does the cropping */
+ csi_reg_write(mx3_cam, ctrl | 0 | (0 << 8), CSI_OUT_FRM_CTRL);
+}
+
+static int acquire_dma_channel(struct mx3_camera_dev *mx3_cam)
+{
+ dma_cap_mask_t mask;
+ struct dma_chan *chan;
+ struct idmac_channel **ichan = &mx3_cam->idmac_channel[0];
+ /* We have to use IDMAC_IC_7 for Bayer / generic data */
+ struct dma_chan_request rq = {.mx3_cam = mx3_cam,
+ .id = IDMAC_IC_7};
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_cap_set(DMA_PRIVATE, mask);
+ chan = dma_request_channel(mask, chan_filter, &rq);
+ if (!chan)
+ return -EBUSY;
+
+ *ichan = to_idmac_chan(chan);
+ (*ichan)->client = mx3_cam;
+
+ return 0;
+}
+
+/*
+ * FIXME: learn to use stride != width, then we can keep stride properly aligned
+ * and support arbitrary (even) widths.
+ */
+static inline void stride_align(__u32 *width)
+{
+ if (ALIGN(*width, 8) < 4096)
+ *width = ALIGN(*width, 8);
+ else
+ *width = *width & ~7;
+}
+
+/*
+ * As long as we don't implement host-side cropping and scaling, we can use
+ * default g_crop and cropcap from soc_camera.c
+ */
+static int mx3_camera_set_crop(struct soc_camera_device *icd,
+ struct v4l2_crop *a)
+{
+ struct v4l2_rect *rect = &a->c;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct mx3_camera_dev *mx3_cam = ici->priv;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct v4l2_mbus_framefmt mf;
+ int ret;
+
+ soc_camera_limit_side(&rect->left, &rect->width, 0, 2, 4096);
+ soc_camera_limit_side(&rect->top, &rect->height, 0, 2, 4096);
+
+ ret = v4l2_subdev_call(sd, video, s_crop, a);
+ if (ret < 0)
+ return ret;
+
+ /* The capture device might have changed its output sizes */
+ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ if (mf.code != icd->current_fmt->code)
+ return -EINVAL;
+
+ if (mf.width & 7) {
+ /* Ouch! We can only handle 8-byte aligned width... */
+ stride_align(&mf.width);
+ ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (mf.width != icd->user_width || mf.height != icd->user_height)
+ configure_geometry(mx3_cam, mf.width, mf.height,
+ icd->current_fmt->host_fmt);
+
+ dev_dbg(icd->parent, "Sensor cropped %dx%d\n",
+ mf.width, mf.height);
+
+ icd->user_width = mf.width;
+ icd->user_height = mf.height;
+
+ return ret;
+}
+
+static int mx3_camera_set_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct mx3_camera_dev *mx3_cam = ici->priv;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ const struct soc_camera_format_xlate *xlate;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_mbus_framefmt mf;
+ int ret;
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+ if (!xlate) {
+ dev_warn(icd->parent, "Format %x not found\n",
+ pix->pixelformat);
+ return -EINVAL;
+ }
+
+ stride_align(&pix->width);
+ dev_dbg(icd->parent, "Set format %dx%d\n", pix->width, pix->height);
+
+ /*
+ * Might have to perform a complete interface initialisation like in
+ * ipu_csi_init_interface() in mxc_v4l2_s_param(). Also consider
+ * mxc_v4l2_s_fmt()
+ */
+
+ configure_geometry(mx3_cam, pix->width, pix->height, xlate->host_fmt);
+
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
+
+ ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ if (mf.code != xlate->code)
+ return -EINVAL;
+
+ if (!mx3_cam->idmac_channel[0]) {
+ ret = acquire_dma_channel(mx3_cam);
+ if (ret < 0)
+ return ret;
+ }
+
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->field = mf.field;
+ mx3_cam->field = mf.field;
+ pix->colorspace = mf.colorspace;
+ icd->current_fmt = xlate;
+
+ dev_dbg(icd->parent, "Sensor set %dx%d\n", pix->width, pix->height);
+
+ return ret;
+}
+
+static int mx3_camera_try_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ const struct soc_camera_format_xlate *xlate;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_mbus_framefmt mf;
+ __u32 pixfmt = pix->pixelformat;
+ int ret;
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+ if (pixfmt && !xlate) {
+ dev_warn(icd->parent, "Format %x not found\n", pixfmt);
+ return -EINVAL;
+ }
+
+ /* limit to MX3 hardware capabilities */
+ if (pix->height > 4096)
+ pix->height = 4096;
+ if (pix->width > 4096)
+ pix->width = 4096;
+
+ /* limit to sensor capabilities */
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
+
+ ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->colorspace = mf.colorspace;
+
+ switch (mf.field) {
+ case V4L2_FIELD_ANY:
+ pix->field = V4L2_FIELD_NONE;
+ break;
+ case V4L2_FIELD_NONE:
+ break;
+ default:
+ dev_err(icd->parent, "Field type %d unsupported.\n",
+ mf.field);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int mx3_camera_reqbufs(struct soc_camera_device *icd,
+ struct v4l2_requestbuffers *p)
+{
+ return 0;
+}
+
+static unsigned int mx3_camera_poll(struct file *file, poll_table *pt)
+{
+ struct soc_camera_device *icd = file->private_data;
+
+ return vb2_poll(&icd->vb2_vidq, file, pt);
+}
+
+static int mx3_camera_querycap(struct soc_camera_host *ici,
+ struct v4l2_capability *cap)
+{
+ /* cap->name is set by the firendly caller:-> */
+ strlcpy(cap->card, "i.MX3x Camera", sizeof(cap->card));
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+
+ return 0;
+}
+
+static int mx3_camera_set_bus_param(struct soc_camera_device *icd)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct mx3_camera_dev *mx3_cam = ici->priv;
+ struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ u32 pixfmt = icd->current_fmt->host_fmt->fourcc;
+ unsigned long bus_flags, common_flags;
+ u32 dw, sens_conf;
+ const struct soc_mbus_pixelfmt *fmt;
+ int buswidth;
+ int ret;
+ const struct soc_camera_format_xlate *xlate;
+ struct device *dev = icd->parent;
+
+ fmt = soc_mbus_get_fmtdesc(icd->current_fmt->code);
+ if (!fmt)
+ return -EINVAL;
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+ if (!xlate) {
+ dev_warn(dev, "Format %x not found\n", pixfmt);
+ return -EINVAL;
+ }
+
+ buswidth = fmt->bits_per_sample;
+ ret = test_platform_param(mx3_cam, buswidth, &bus_flags);
+
+ dev_dbg(dev, "requested bus width %d bit: %d\n", buswidth, ret);
+
+ if (ret < 0)
+ return ret;
+
+ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+ if (!ret) {
+ common_flags = soc_mbus_config_compatible(&cfg,
+ bus_flags);
+ if (!common_flags) {
+ dev_warn(icd->parent,
+ "Flags incompatible: camera 0x%x, host 0x%lx\n",
+ cfg.flags, bus_flags);
+ return -EINVAL;
+ }
+ } else if (ret != -ENOIOCTLCMD) {
+ return ret;
+ } else {
+ common_flags = bus_flags;
+ }
+
+ dev_dbg(dev, "Flags cam: 0x%x host: 0x%lx common: 0x%lx\n",
+ cfg.flags, bus_flags, common_flags);
+
+ /* Make choices, based on platform preferences */
+ if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
+ if (mx3_cam->platform_flags & MX3_CAMERA_HSP)
+ common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH;
+ else
+ common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW;
+ }
+
+ if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) {
+ if (mx3_cam->platform_flags & MX3_CAMERA_VSP)
+ common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH;
+ else
+ common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW;
+ }
+
+ if ((common_flags & V4L2_MBUS_DATA_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_DATA_ACTIVE_LOW)) {
+ if (mx3_cam->platform_flags & MX3_CAMERA_DP)
+ common_flags &= ~V4L2_MBUS_DATA_ACTIVE_HIGH;
+ else
+ common_flags &= ~V4L2_MBUS_DATA_ACTIVE_LOW;
+ }
+
+ if ((common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) &&
+ (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)) {
+ if (mx3_cam->platform_flags & MX3_CAMERA_PCP)
+ common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_RISING;
+ else
+ common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_FALLING;
+ }
+
+ cfg.flags = common_flags;
+ ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_dbg(dev, "camera s_mbus_config(0x%lx) returned %d\n",
+ common_flags, ret);
+ return ret;
+ }
+
+ /*
+ * So far only gated clock mode is supported. Add a line
+ * (3 << CSI_SENS_CONF_SENS_PRTCL_SHIFT) |
+ * below and select the required mode when supporting other
+ * synchronisation protocols.
+ */
+ sens_conf = csi_reg_read(mx3_cam, CSI_SENS_CONF) &
+ ~((1 << CSI_SENS_CONF_VSYNC_POL_SHIFT) |
+ (1 << CSI_SENS_CONF_HSYNC_POL_SHIFT) |
+ (1 << CSI_SENS_CONF_DATA_POL_SHIFT) |
+ (1 << CSI_SENS_CONF_PIX_CLK_POL_SHIFT) |
+ (3 << CSI_SENS_CONF_DATA_FMT_SHIFT) |
+ (3 << CSI_SENS_CONF_DATA_WIDTH_SHIFT));
+
+ /* TODO: Support RGB and YUV formats */
+
+ /* This has been set in mx3_camera_activate(), but we clear it above */
+ sens_conf |= CSI_SENS_CONF_DATA_FMT_BAYER;
+
+ if (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ sens_conf |= 1 << CSI_SENS_CONF_PIX_CLK_POL_SHIFT;
+ if (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ sens_conf |= 1 << CSI_SENS_CONF_HSYNC_POL_SHIFT;
+ if (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ sens_conf |= 1 << CSI_SENS_CONF_VSYNC_POL_SHIFT;
+ if (common_flags & V4L2_MBUS_DATA_ACTIVE_LOW)
+ sens_conf |= 1 << CSI_SENS_CONF_DATA_POL_SHIFT;
+
+ /* Just do what we're asked to do */
+ switch (xlate->host_fmt->bits_per_sample) {
+ case 4:
+ dw = 0 << CSI_SENS_CONF_DATA_WIDTH_SHIFT;
+ break;
+ case 8:
+ dw = 1 << CSI_SENS_CONF_DATA_WIDTH_SHIFT;
+ break;
+ case 10:
+ dw = 2 << CSI_SENS_CONF_DATA_WIDTH_SHIFT;
+ break;
+ default:
+ /*
+ * Actually it can only be 15 now, default is just to silence
+ * compiler warnings
+ */
+ case 15:
+ dw = 3 << CSI_SENS_CONF_DATA_WIDTH_SHIFT;
+ }
+
+ csi_reg_write(mx3_cam, sens_conf | dw, CSI_SENS_CONF);
+
+ dev_dbg(dev, "Set SENS_CONF to %x\n", sens_conf | dw);
+
+ return 0;
+}
+
+static struct soc_camera_host_ops mx3_soc_camera_host_ops = {
+ .owner = THIS_MODULE,
+ .add = mx3_camera_add_device,
+ .remove = mx3_camera_remove_device,
+ .set_crop = mx3_camera_set_crop,
+ .set_fmt = mx3_camera_set_fmt,
+ .try_fmt = mx3_camera_try_fmt,
+ .get_formats = mx3_camera_get_formats,
+ .init_videobuf2 = mx3_camera_init_videobuf,
+ .reqbufs = mx3_camera_reqbufs,
+ .poll = mx3_camera_poll,
+ .querycap = mx3_camera_querycap,
+ .set_bus_param = mx3_camera_set_bus_param,
+};
+
+static int __devinit mx3_camera_probe(struct platform_device *pdev)
+{
+ struct mx3_camera_dev *mx3_cam;
+ struct resource *res;
+ void __iomem *base;
+ int err = 0;
+ struct soc_camera_host *soc_host;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -ENODEV;
+ goto egetres;
+ }
+
+ mx3_cam = vzalloc(sizeof(*mx3_cam));
+ if (!mx3_cam) {
+ dev_err(&pdev->dev, "Could not allocate mx3 camera object\n");
+ err = -ENOMEM;
+ goto ealloc;
+ }
+
+ mx3_cam->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(mx3_cam->clk)) {
+ err = PTR_ERR(mx3_cam->clk);
+ goto eclkget;
+ }
+
+ mx3_cam->pdata = pdev->dev.platform_data;
+ mx3_cam->platform_flags = mx3_cam->pdata->flags;
+ if (!(mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_MASK)) {
+ /*
+ * Platform hasn't set available data widths. This is bad.
+ * Warn and use a default.
+ */
+ dev_warn(&pdev->dev, "WARNING! Platform hasn't set available "
+ "data widths, using default 8 bit\n");
+ mx3_cam->platform_flags |= MX3_CAMERA_DATAWIDTH_8;
+ }
+ if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_4)
+ mx3_cam->width_flags = 1 << 3;
+ if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_8)
+ mx3_cam->width_flags |= 1 << 7;
+ if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_10)
+ mx3_cam->width_flags |= 1 << 9;
+ if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_15)
+ mx3_cam->width_flags |= 1 << 14;
+
+ mx3_cam->mclk = mx3_cam->pdata->mclk_10khz * 10000;
+ if (!mx3_cam->mclk) {
+ dev_warn(&pdev->dev,
+ "mclk_10khz == 0! Please, fix your platform data. "
+ "Using default 20MHz\n");
+ mx3_cam->mclk = 20000000;
+ }
+
+ /* list of video-buffers */
+ INIT_LIST_HEAD(&mx3_cam->capture);
+ spin_lock_init(&mx3_cam->lock);
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base) {
+ pr_err("Couldn't map %x@%x\n", resource_size(res), res->start);
+ err = -ENOMEM;
+ goto eioremap;
+ }
+
+ mx3_cam->base = base;
+
+ soc_host = &mx3_cam->soc_host;
+ soc_host->drv_name = MX3_CAM_DRV_NAME;
+ soc_host->ops = &mx3_soc_camera_host_ops;
+ soc_host->priv = mx3_cam;
+ soc_host->v4l2_dev.dev = &pdev->dev;
+ soc_host->nr = pdev->id;
+
+ mx3_cam->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+ if (IS_ERR(mx3_cam->alloc_ctx)) {
+ err = PTR_ERR(mx3_cam->alloc_ctx);
+ goto eallocctx;
+ }
+
+ err = soc_camera_host_register(soc_host);
+ if (err)
+ goto ecamhostreg;
+
+ /* IDMAC interface */
+ dmaengine_get();
+
+ return 0;
+
+ecamhostreg:
+ vb2_dma_contig_cleanup_ctx(mx3_cam->alloc_ctx);
+eallocctx:
+ iounmap(base);
+eioremap:
+ clk_put(mx3_cam->clk);
+eclkget:
+ vfree(mx3_cam);
+ealloc:
+egetres:
+ return err;
+}
+
+static int __devexit mx3_camera_remove(struct platform_device *pdev)
+{
+ struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
+ struct mx3_camera_dev *mx3_cam = container_of(soc_host,
+ struct mx3_camera_dev, soc_host);
+
+ clk_put(mx3_cam->clk);
+
+ soc_camera_host_unregister(soc_host);
+
+ iounmap(mx3_cam->base);
+
+ /*
+ * The channel has either not been allocated,
+ * or should have been released
+ */
+ if (WARN_ON(mx3_cam->idmac_channel[0]))
+ dma_release_channel(&mx3_cam->idmac_channel[0]->dma_chan);
+
+ vb2_dma_contig_cleanup_ctx(mx3_cam->alloc_ctx);
+
+ vfree(mx3_cam);
+
+ dmaengine_put();
+
+ return 0;
+}
+
+static struct platform_driver mx3_camera_driver = {
+ .driver = {
+ .name = MX3_CAM_DRV_NAME,
+ },
+ .probe = mx3_camera_probe,
+ .remove = __devexit_p(mx3_camera_remove),
+};
+
+module_platform_driver(mx3_camera_driver);
+
+MODULE_DESCRIPTION("i.MX3x SoC Camera Host driver");
+MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.2.3");
+MODULE_ALIAS("platform:" MX3_CAM_DRV_NAME);
diff --git a/drivers/media/platform/soc_camera/omap1_camera.c b/drivers/media/platform/soc_camera/omap1_camera.c
new file mode 100644
index 0000000..fa08c76
--- /dev/null
+++ b/drivers/media/platform/soc_camera/omap1_camera.c
@@ -0,0 +1,1723 @@
+/*
+ * V4L2 SoC Camera driver for OMAP1 Camera Interface
+ *
+ * Copyright (C) 2010, Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
+ *
+ * Based on V4L2 Driver for i.MXL/i.MXL camera (CSI) host
+ * Copyright (C) 2008, Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+ * Copyright (C) 2009, Darius Augulis <augulis.darius@gmail.com>
+ *
+ * Based on PXA SoC camera driver
+ * Copyright (C) 2006, Sascha Hauer, Pengutronix
+ * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+ *
+ * Hardware specific bits initialy based on former work by Matt Callow
+ * drivers/media/platform/omap/omap1510cam.c
+ * Copyright (C) 2006 Matt Callow
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <media/omap1_camera.h>
+#include <media/soc_camera.h>
+#include <media/soc_mediabus.h>
+#include <media/videobuf-dma-contig.h>
+#include <media/videobuf-dma-sg.h>
+
+#include <plat/dma.h>
+
+
+#define DRIVER_NAME "omap1-camera"
+#define DRIVER_VERSION "0.0.2"
+
+
+/*
+ * ---------------------------------------------------------------------------
+ * OMAP1 Camera Interface registers
+ * ---------------------------------------------------------------------------
+ */
+
+#define REG_CTRLCLOCK 0x00
+#define REG_IT_STATUS 0x04
+#define REG_MODE 0x08
+#define REG_STATUS 0x0C
+#define REG_CAMDATA 0x10
+#define REG_GPIO 0x14
+#define REG_PEAK_COUNTER 0x18
+
+/* CTRLCLOCK bit shifts */
+#define LCLK_EN BIT(7)
+#define DPLL_EN BIT(6)
+#define MCLK_EN BIT(5)
+#define CAMEXCLK_EN BIT(4)
+#define POLCLK BIT(3)
+#define FOSCMOD_SHIFT 0
+#define FOSCMOD_MASK (0x7 << FOSCMOD_SHIFT)
+#define FOSCMOD_12MHz 0x0
+#define FOSCMOD_6MHz 0x2
+#define FOSCMOD_9_6MHz 0x4
+#define FOSCMOD_24MHz 0x5
+#define FOSCMOD_8MHz 0x6
+
+/* IT_STATUS bit shifts */
+#define DATA_TRANSFER BIT(5)
+#define FIFO_FULL BIT(4)
+#define H_DOWN BIT(3)
+#define H_UP BIT(2)
+#define V_DOWN BIT(1)
+#define V_UP BIT(0)
+
+/* MODE bit shifts */
+#define RAZ_FIFO BIT(18)
+#define EN_FIFO_FULL BIT(17)
+#define EN_NIRQ BIT(16)
+#define THRESHOLD_SHIFT 9
+#define THRESHOLD_MASK (0x7f << THRESHOLD_SHIFT)
+#define DMA BIT(8)
+#define EN_H_DOWN BIT(7)
+#define EN_H_UP BIT(6)
+#define EN_V_DOWN BIT(5)
+#define EN_V_UP BIT(4)
+#define ORDERCAMD BIT(3)
+
+#define IRQ_MASK (EN_V_UP | EN_V_DOWN | EN_H_UP | EN_H_DOWN | \
+ EN_NIRQ | EN_FIFO_FULL)
+
+/* STATUS bit shifts */
+#define HSTATUS BIT(1)
+#define VSTATUS BIT(0)
+
+/* GPIO bit shifts */
+#define CAM_RST BIT(0)
+
+/* end of OMAP1 Camera Interface registers */
+
+
+#define SOCAM_BUS_FLAGS (V4L2_MBUS_MASTER | \
+ V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH | \
+ V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING | \
+ V4L2_MBUS_DATA_ACTIVE_HIGH)
+
+
+#define FIFO_SIZE ((THRESHOLD_MASK >> THRESHOLD_SHIFT) + 1)
+#define FIFO_SHIFT __fls(FIFO_SIZE)
+
+#define DMA_BURST_SHIFT (1 + OMAP_DMA_DATA_BURST_4)
+#define DMA_BURST_SIZE (1 << DMA_BURST_SHIFT)
+
+#define DMA_ELEMENT_SHIFT OMAP_DMA_DATA_TYPE_S32
+#define DMA_ELEMENT_SIZE (1 << DMA_ELEMENT_SHIFT)
+
+#define DMA_FRAME_SHIFT_CONTIG (FIFO_SHIFT - 1)
+#define DMA_FRAME_SHIFT_SG DMA_BURST_SHIFT
+
+#define DMA_FRAME_SHIFT(x) ((x) == OMAP1_CAM_DMA_CONTIG ? \
+ DMA_FRAME_SHIFT_CONTIG : \
+ DMA_FRAME_SHIFT_SG)
+#define DMA_FRAME_SIZE(x) (1 << DMA_FRAME_SHIFT(x))
+#define DMA_SYNC OMAP_DMA_SYNC_FRAME
+#define THRESHOLD_LEVEL DMA_FRAME_SIZE
+
+
+#define MAX_VIDEO_MEM 4 /* arbitrary video memory limit in MB */
+
+
+/*
+ * Structures
+ */
+
+/* buffer for one video frame */
+struct omap1_cam_buf {
+ struct videobuf_buffer vb;
+ enum v4l2_mbus_pixelcode code;
+ int inwork;
+ struct scatterlist *sgbuf;
+ int sgcount;
+ int bytes_left;
+ enum videobuf_state result;
+};
+
+struct omap1_cam_dev {
+ struct soc_camera_host soc_host;
+ struct soc_camera_device *icd;
+ struct clk *clk;
+
+ unsigned int irq;
+ void __iomem *base;
+
+ int dma_ch;
+
+ struct omap1_cam_platform_data *pdata;
+ struct resource *res;
+ unsigned long pflags;
+ unsigned long camexclk;
+
+ struct list_head capture;
+
+ /* lock used to protect videobuf */
+ spinlock_t lock;
+
+ /* Pointers to DMA buffers */
+ struct omap1_cam_buf *active;
+ struct omap1_cam_buf *ready;
+
+ enum omap1_cam_vb_mode vb_mode;
+ int (*mmap_mapper)(struct videobuf_queue *q,
+ struct videobuf_buffer *buf,
+ struct vm_area_struct *vma);
+
+ u32 reg_cache[0];
+};
+
+
+static void cam_write(struct omap1_cam_dev *pcdev, u16 reg, u32 val)
+{
+ pcdev->reg_cache[reg / sizeof(u32)] = val;
+ __raw_writel(val, pcdev->base + reg);
+}
+
+static u32 cam_read(struct omap1_cam_dev *pcdev, u16 reg, bool from_cache)
+{
+ return !from_cache ? __raw_readl(pcdev->base + reg) :
+ pcdev->reg_cache[reg / sizeof(u32)];
+}
+
+#define CAM_READ(pcdev, reg) \
+ cam_read(pcdev, REG_##reg, false)
+#define CAM_WRITE(pcdev, reg, val) \
+ cam_write(pcdev, REG_##reg, val)
+#define CAM_READ_CACHE(pcdev, reg) \
+ cam_read(pcdev, REG_##reg, true)
+
+/*
+ * Videobuf operations
+ */
+static int omap1_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
+ unsigned int *size)
+{
+ struct soc_camera_device *icd = vq->priv_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+
+ *size = icd->sizeimage;
+
+ if (!*count || *count < OMAP1_CAMERA_MIN_BUF_COUNT(pcdev->vb_mode))
+ *count = OMAP1_CAMERA_MIN_BUF_COUNT(pcdev->vb_mode);
+
+ if (*size * *count > MAX_VIDEO_MEM * 1024 * 1024)
+ *count = (MAX_VIDEO_MEM * 1024 * 1024) / *size;
+
+ dev_dbg(icd->parent,
+ "%s: count=%d, size=%d\n", __func__, *count, *size);
+
+ return 0;
+}
+
+static void free_buffer(struct videobuf_queue *vq, struct omap1_cam_buf *buf,
+ enum omap1_cam_vb_mode vb_mode)
+{
+ struct videobuf_buffer *vb = &buf->vb;
+
+ BUG_ON(in_interrupt());
+
+ videobuf_waiton(vq, vb, 0, 0);
+
+ if (vb_mode == OMAP1_CAM_DMA_CONTIG) {
+ videobuf_dma_contig_free(vq, vb);
+ } else {
+ struct soc_camera_device *icd = vq->priv_data;
+ struct device *dev = icd->parent;
+ struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
+
+ videobuf_dma_unmap(dev, dma);
+ videobuf_dma_free(dma);
+ }
+
+ vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static int omap1_videobuf_prepare(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb, enum v4l2_field field)
+{
+ struct soc_camera_device *icd = vq->priv_data;
+ struct omap1_cam_buf *buf = container_of(vb, struct omap1_cam_buf, vb);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+ int ret;
+
+ WARN_ON(!list_empty(&vb->queue));
+
+ BUG_ON(NULL == icd->current_fmt);
+
+ buf->inwork = 1;
+
+ if (buf->code != icd->current_fmt->code || vb->field != field ||
+ vb->width != icd->user_width ||
+ vb->height != icd->user_height) {
+ buf->code = icd->current_fmt->code;
+ vb->width = icd->user_width;
+ vb->height = icd->user_height;
+ vb->field = field;
+ vb->state = VIDEOBUF_NEEDS_INIT;
+ }
+
+ vb->size = icd->sizeimage;
+
+ if (vb->baddr && vb->bsize < vb->size) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (vb->state == VIDEOBUF_NEEDS_INIT) {
+ ret = videobuf_iolock(vq, vb, NULL);
+ if (ret)
+ goto fail;
+
+ vb->state = VIDEOBUF_PREPARED;
+ }
+ buf->inwork = 0;
+
+ return 0;
+fail:
+ free_buffer(vq, buf, pcdev->vb_mode);
+out:
+ buf->inwork = 0;
+ return ret;
+}
+
+static void set_dma_dest_params(int dma_ch, struct omap1_cam_buf *buf,
+ enum omap1_cam_vb_mode vb_mode)
+{
+ dma_addr_t dma_addr;
+ unsigned int block_size;
+
+ if (vb_mode == OMAP1_CAM_DMA_CONTIG) {
+ dma_addr = videobuf_to_dma_contig(&buf->vb);
+ block_size = buf->vb.size;
+ } else {
+ if (WARN_ON(!buf->sgbuf)) {
+ buf->result = VIDEOBUF_ERROR;
+ return;
+ }
+ dma_addr = sg_dma_address(buf->sgbuf);
+ if (WARN_ON(!dma_addr)) {
+ buf->sgbuf = NULL;
+ buf->result = VIDEOBUF_ERROR;
+ return;
+ }
+ block_size = sg_dma_len(buf->sgbuf);
+ if (WARN_ON(!block_size)) {
+ buf->sgbuf = NULL;
+ buf->result = VIDEOBUF_ERROR;
+ return;
+ }
+ if (unlikely(buf->bytes_left < block_size))
+ block_size = buf->bytes_left;
+ if (WARN_ON(dma_addr & (DMA_FRAME_SIZE(vb_mode) *
+ DMA_ELEMENT_SIZE - 1))) {
+ dma_addr = ALIGN(dma_addr, DMA_FRAME_SIZE(vb_mode) *
+ DMA_ELEMENT_SIZE);
+ block_size &= ~(DMA_FRAME_SIZE(vb_mode) *
+ DMA_ELEMENT_SIZE - 1);
+ }
+ buf->bytes_left -= block_size;
+ buf->sgcount++;
+ }
+
+ omap_set_dma_dest_params(dma_ch,
+ OMAP_DMA_PORT_EMIFF, OMAP_DMA_AMODE_POST_INC, dma_addr, 0, 0);
+ omap_set_dma_transfer_params(dma_ch,
+ OMAP_DMA_DATA_TYPE_S32, DMA_FRAME_SIZE(vb_mode),
+ block_size >> (DMA_FRAME_SHIFT(vb_mode) + DMA_ELEMENT_SHIFT),
+ DMA_SYNC, 0, 0);
+}
+
+static struct omap1_cam_buf *prepare_next_vb(struct omap1_cam_dev *pcdev)
+{
+ struct omap1_cam_buf *buf;
+
+ /*
+ * If there is already a buffer pointed out by the pcdev->ready,
+ * (re)use it, otherwise try to fetch and configure a new one.
+ */
+ buf = pcdev->ready;
+ if (!buf) {
+ if (list_empty(&pcdev->capture))
+ return buf;
+ buf = list_entry(pcdev->capture.next,
+ struct omap1_cam_buf, vb.queue);
+ buf->vb.state = VIDEOBUF_ACTIVE;
+ pcdev->ready = buf;
+ list_del_init(&buf->vb.queue);
+ }
+
+ if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) {
+ /*
+ * In CONTIG mode, we can safely enter next buffer parameters
+ * into the DMA programming register set after the DMA
+ * has already been activated on the previous buffer
+ */
+ set_dma_dest_params(pcdev->dma_ch, buf, pcdev->vb_mode);
+ } else {
+ /*
+ * In SG mode, the above is not safe since there are probably
+ * a bunch of sgbufs from previous sglist still pending.
+ * Instead, mark the sglist fresh for the upcoming
+ * try_next_sgbuf().
+ */
+ buf->sgbuf = NULL;
+ }
+
+ return buf;
+}
+
+static struct scatterlist *try_next_sgbuf(int dma_ch, struct omap1_cam_buf *buf)
+{
+ struct scatterlist *sgbuf;
+
+ if (likely(buf->sgbuf)) {
+ /* current sglist is active */
+ if (unlikely(!buf->bytes_left)) {
+ /* indicate sglist complete */
+ sgbuf = NULL;
+ } else {
+ /* process next sgbuf */
+ sgbuf = sg_next(buf->sgbuf);
+ if (WARN_ON(!sgbuf)) {
+ buf->result = VIDEOBUF_ERROR;
+ } else if (WARN_ON(!sg_dma_len(sgbuf))) {
+ sgbuf = NULL;
+ buf->result = VIDEOBUF_ERROR;
+ }
+ }
+ buf->sgbuf = sgbuf;
+ } else {
+ /* sglist is fresh, initialize it before using */
+ struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
+
+ sgbuf = dma->sglist;
+ if (!(WARN_ON(!sgbuf))) {
+ buf->sgbuf = sgbuf;
+ buf->sgcount = 0;
+ buf->bytes_left = buf->vb.size;
+ buf->result = VIDEOBUF_DONE;
+ }
+ }
+ if (sgbuf)
+ /*
+ * Put our next sgbuf parameters (address, size)
+ * into the DMA programming register set.
+ */
+ set_dma_dest_params(dma_ch, buf, OMAP1_CAM_DMA_SG);
+
+ return sgbuf;
+}
+
+static void start_capture(struct omap1_cam_dev *pcdev)
+{
+ struct omap1_cam_buf *buf = pcdev->active;
+ u32 ctrlclock = CAM_READ_CACHE(pcdev, CTRLCLOCK);
+ u32 mode = CAM_READ_CACHE(pcdev, MODE) & ~EN_V_DOWN;
+
+ if (WARN_ON(!buf))
+ return;
+
+ /*
+ * Enable start of frame interrupt, which we will use for activating
+ * our end of frame watchdog when capture actually starts.
+ */
+ mode |= EN_V_UP;
+
+ if (unlikely(ctrlclock & LCLK_EN))
+ /* stop pixel clock before FIFO reset */
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~LCLK_EN);
+ /* reset FIFO */
+ CAM_WRITE(pcdev, MODE, mode | RAZ_FIFO);
+
+ omap_start_dma(pcdev->dma_ch);
+
+ if (pcdev->vb_mode == OMAP1_CAM_DMA_SG) {
+ /*
+ * In SG mode, it's a good moment for fetching next sgbuf
+ * from the current sglist and, if available, already putting
+ * its parameters into the DMA programming register set.
+ */
+ try_next_sgbuf(pcdev->dma_ch, buf);
+ }
+
+ /* (re)enable pixel clock */
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock | LCLK_EN);
+ /* release FIFO reset */
+ CAM_WRITE(pcdev, MODE, mode);
+}
+
+static void suspend_capture(struct omap1_cam_dev *pcdev)
+{
+ u32 ctrlclock = CAM_READ_CACHE(pcdev, CTRLCLOCK);
+
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~LCLK_EN);
+ omap_stop_dma(pcdev->dma_ch);
+}
+
+static void disable_capture(struct omap1_cam_dev *pcdev)
+{
+ u32 mode = CAM_READ_CACHE(pcdev, MODE);
+
+ CAM_WRITE(pcdev, MODE, mode & ~(IRQ_MASK | DMA));
+}
+
+static void omap1_videobuf_queue(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct soc_camera_device *icd = vq->priv_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+ struct omap1_cam_buf *buf;
+ u32 mode;
+
+ list_add_tail(&vb->queue, &pcdev->capture);
+ vb->state = VIDEOBUF_QUEUED;
+
+ if (pcdev->active) {
+ /*
+ * Capture in progress, so don't touch pcdev->ready even if
+ * empty. Since the transfer of the DMA programming register set
+ * content to the DMA working register set is done automatically
+ * by the DMA hardware, this can pretty well happen while we
+ * are keeping the lock here. Leave fetching it from the queue
+ * to be done when a next DMA interrupt occures instead.
+ */
+ return;
+ }
+
+ WARN_ON(pcdev->ready);
+
+ buf = prepare_next_vb(pcdev);
+ if (WARN_ON(!buf))
+ return;
+
+ pcdev->active = buf;
+ pcdev->ready = NULL;
+
+ dev_dbg(icd->parent,
+ "%s: capture not active, setup FIFO, start DMA\n", __func__);
+ mode = CAM_READ_CACHE(pcdev, MODE) & ~THRESHOLD_MASK;
+ mode |= THRESHOLD_LEVEL(pcdev->vb_mode) << THRESHOLD_SHIFT;
+ CAM_WRITE(pcdev, MODE, mode | EN_FIFO_FULL | DMA);
+
+ if (pcdev->vb_mode == OMAP1_CAM_DMA_SG) {
+ /*
+ * In SG mode, the above prepare_next_vb() didn't actually
+ * put anything into the DMA programming register set,
+ * so we have to do it now, before activating DMA.
+ */
+ try_next_sgbuf(pcdev->dma_ch, buf);
+ }
+
+ start_capture(pcdev);
+}
+
+static void omap1_videobuf_release(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct omap1_cam_buf *buf =
+ container_of(vb, struct omap1_cam_buf, vb);
+ struct soc_camera_device *icd = vq->priv_data;
+ struct device *dev = icd->parent;
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
+ struct omap1_cam_dev *pcdev = ici->priv;
+
+ switch (vb->state) {
+ case VIDEOBUF_DONE:
+ dev_dbg(dev, "%s (done)\n", __func__);
+ break;
+ case VIDEOBUF_ACTIVE:
+ dev_dbg(dev, "%s (active)\n", __func__);
+ break;
+ case VIDEOBUF_QUEUED:
+ dev_dbg(dev, "%s (queued)\n", __func__);
+ break;
+ case VIDEOBUF_PREPARED:
+ dev_dbg(dev, "%s (prepared)\n", __func__);
+ break;
+ default:
+ dev_dbg(dev, "%s (unknown %d)\n", __func__, vb->state);
+ break;
+ }
+
+ free_buffer(vq, buf, pcdev->vb_mode);
+}
+
+static void videobuf_done(struct omap1_cam_dev *pcdev,
+ enum videobuf_state result)
+{
+ struct omap1_cam_buf *buf = pcdev->active;
+ struct videobuf_buffer *vb;
+ struct device *dev = pcdev->icd->parent;
+
+ if (WARN_ON(!buf)) {
+ suspend_capture(pcdev);
+ disable_capture(pcdev);
+ return;
+ }
+
+ if (result == VIDEOBUF_ERROR)
+ suspend_capture(pcdev);
+
+ vb = &buf->vb;
+ if (waitqueue_active(&vb->done)) {
+ if (!pcdev->ready && result != VIDEOBUF_ERROR) {
+ /*
+ * No next buffer has been entered into the DMA
+ * programming register set on time (could be done only
+ * while the previous DMA interurpt was processed, not
+ * later), so the last DMA block, be it a whole buffer
+ * if in CONTIG or its last sgbuf if in SG mode, is
+ * about to be reused by the just autoreinitialized DMA
+ * engine, and overwritten with next frame data. Best we
+ * can do is stopping the capture as soon as possible,
+ * hopefully before the next frame start.
+ */
+ suspend_capture(pcdev);
+ }
+ vb->state = result;
+ do_gettimeofday(&vb->ts);
+ if (result != VIDEOBUF_ERROR)
+ vb->field_count++;
+ wake_up(&vb->done);
+
+ /* shift in next buffer */
+ buf = pcdev->ready;
+ pcdev->active = buf;
+ pcdev->ready = NULL;
+
+ if (!buf) {
+ /*
+ * No next buffer was ready on time (see above), so
+ * indicate error condition to force capture restart or
+ * stop, depending on next buffer already queued or not.
+ */
+ result = VIDEOBUF_ERROR;
+ prepare_next_vb(pcdev);
+
+ buf = pcdev->ready;
+ pcdev->active = buf;
+ pcdev->ready = NULL;
+ }
+ } else if (pcdev->ready) {
+ /*
+ * In both CONTIG and SG mode, the DMA engine has possibly
+ * been already autoreinitialized with the preprogrammed
+ * pcdev->ready buffer. We can either accept this fact
+ * and just swap the buffers, or provoke an error condition
+ * and restart capture. The former seems less intrusive.
+ */
+ dev_dbg(dev, "%s: nobody waiting on videobuf, swap with next\n",
+ __func__);
+ pcdev->active = pcdev->ready;
+
+ if (pcdev->vb_mode == OMAP1_CAM_DMA_SG) {
+ /*
+ * In SG mode, we have to make sure that the buffer we
+ * are putting back into the pcdev->ready is marked
+ * fresh.
+ */
+ buf->sgbuf = NULL;
+ }
+ pcdev->ready = buf;
+
+ buf = pcdev->active;
+ } else {
+ /*
+ * No next buffer has been entered into
+ * the DMA programming register set on time.
+ */
+ if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) {
+ /*
+ * In CONTIG mode, the DMA engine has already been
+ * reinitialized with the current buffer. Best we can do
+ * is not touching it.
+ */
+ dev_dbg(dev,
+ "%s: nobody waiting on videobuf, reuse it\n",
+ __func__);
+ } else {
+ /*
+ * In SG mode, the DMA engine has just been
+ * autoreinitialized with the last sgbuf from the
+ * current list. Restart capture in order to transfer
+ * next frame start into the first sgbuf, not the last
+ * one.
+ */
+ if (result != VIDEOBUF_ERROR) {
+ suspend_capture(pcdev);
+ result = VIDEOBUF_ERROR;
+ }
+ }
+ }
+
+ if (!buf) {
+ dev_dbg(dev, "%s: no more videobufs, stop capture\n", __func__);
+ disable_capture(pcdev);
+ return;
+ }
+
+ if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) {
+ /*
+ * In CONTIG mode, the current buffer parameters had already
+ * been entered into the DMA programming register set while the
+ * buffer was fetched with prepare_next_vb(), they may have also
+ * been transferred into the runtime set and already active if
+ * the DMA still running.
+ */
+ } else {
+ /* In SG mode, extra steps are required */
+ if (result == VIDEOBUF_ERROR)
+ /* make sure we (re)use sglist from start on error */
+ buf->sgbuf = NULL;
+
+ /*
+ * In any case, enter the next sgbuf parameters into the DMA
+ * programming register set. They will be used either during
+ * nearest DMA autoreinitialization or, in case of an error,
+ * on DMA startup below.
+ */
+ try_next_sgbuf(pcdev->dma_ch, buf);
+ }
+
+ if (result == VIDEOBUF_ERROR) {
+ dev_dbg(dev, "%s: videobuf error; reset FIFO, restart DMA\n",
+ __func__);
+ start_capture(pcdev);
+ /*
+ * In SG mode, the above also resulted in the next sgbuf
+ * parameters being entered into the DMA programming register
+ * set, making them ready for next DMA autoreinitialization.
+ */
+ }
+
+ /*
+ * Finally, try fetching next buffer.
+ * In CONTIG mode, it will also enter it into the DMA programming
+ * register set, making it ready for next DMA autoreinitialization.
+ */
+ prepare_next_vb(pcdev);
+}
+
+static void dma_isr(int channel, unsigned short status, void *data)
+{
+ struct omap1_cam_dev *pcdev = data;
+ struct omap1_cam_buf *buf = pcdev->active;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pcdev->lock, flags);
+
+ if (WARN_ON(!buf)) {
+ suspend_capture(pcdev);
+ disable_capture(pcdev);
+ goto out;
+ }
+
+ if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) {
+ /*
+ * In CONTIG mode, assume we have just managed to collect the
+ * whole frame, hopefully before our end of frame watchdog is
+ * triggered. Then, all we have to do is disabling the watchdog
+ * for this frame, and calling videobuf_done() with success
+ * indicated.
+ */
+ CAM_WRITE(pcdev, MODE,
+ CAM_READ_CACHE(pcdev, MODE) & ~EN_V_DOWN);
+ videobuf_done(pcdev, VIDEOBUF_DONE);
+ } else {
+ /*
+ * In SG mode, we have to process every sgbuf from the current
+ * sglist, one after another.
+ */
+ if (buf->sgbuf) {
+ /*
+ * Current sglist not completed yet, try fetching next
+ * sgbuf, hopefully putting it into the DMA programming
+ * register set, making it ready for next DMA
+ * autoreinitialization.
+ */
+ try_next_sgbuf(pcdev->dma_ch, buf);
+ if (buf->sgbuf)
+ goto out;
+
+ /*
+ * No more sgbufs left in the current sglist. This
+ * doesn't mean that the whole videobuffer is already
+ * complete, but only that the last sgbuf from the
+ * current sglist is about to be filled. It will be
+ * ready on next DMA interrupt, signalled with the
+ * buf->sgbuf set back to NULL.
+ */
+ if (buf->result != VIDEOBUF_ERROR) {
+ /*
+ * Video frame collected without errors so far,
+ * we can prepare for collecting a next one
+ * as soon as DMA gets autoreinitialized
+ * after the current (last) sgbuf is completed.
+ */
+ buf = prepare_next_vb(pcdev);
+ if (!buf)
+ goto out;
+
+ try_next_sgbuf(pcdev->dma_ch, buf);
+ goto out;
+ }
+ }
+ /* end of videobuf */
+ videobuf_done(pcdev, buf->result);
+ }
+
+out:
+ spin_unlock_irqrestore(&pcdev->lock, flags);
+}
+
+static irqreturn_t cam_isr(int irq, void *data)
+{
+ struct omap1_cam_dev *pcdev = data;
+ struct device *dev = pcdev->icd->parent;
+ struct omap1_cam_buf *buf = pcdev->active;
+ u32 it_status;
+ unsigned long flags;
+
+ it_status = CAM_READ(pcdev, IT_STATUS);
+ if (!it_status)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&pcdev->lock, flags);
+
+ if (WARN_ON(!buf)) {
+ dev_warn(dev, "%s: unhandled camera interrupt, status == %#x\n",
+ __func__, it_status);
+ suspend_capture(pcdev);
+ disable_capture(pcdev);
+ goto out;
+ }
+
+ if (unlikely(it_status & FIFO_FULL)) {
+ dev_warn(dev, "%s: FIFO overflow\n", __func__);
+
+ } else if (it_status & V_DOWN) {
+ /* end of video frame watchdog */
+ if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) {
+ /*
+ * In CONTIG mode, the watchdog is disabled with
+ * successful DMA end of block interrupt, and reenabled
+ * on next frame start. If we get here, there is nothing
+ * to check, we must be out of sync.
+ */
+ } else {
+ if (buf->sgcount == 2) {
+ /*
+ * If exactly 2 sgbufs from the next sglist have
+ * been programmed into the DMA engine (the
+ * first one already transferred into the DMA
+ * runtime register set, the second one still
+ * in the programming set), then we are in sync.
+ */
+ goto out;
+ }
+ }
+ dev_notice(dev, "%s: unexpected end of video frame\n",
+ __func__);
+
+ } else if (it_status & V_UP) {
+ u32 mode;
+
+ if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) {
+ /*
+ * In CONTIG mode, we need this interrupt every frame
+ * in oredr to reenable our end of frame watchdog.
+ */
+ mode = CAM_READ_CACHE(pcdev, MODE);
+ } else {
+ /*
+ * In SG mode, the below enabled end of frame watchdog
+ * is kept on permanently, so we can turn this one shot
+ * setup off.
+ */
+ mode = CAM_READ_CACHE(pcdev, MODE) & ~EN_V_UP;
+ }
+
+ if (!(mode & EN_V_DOWN)) {
+ /* (re)enable end of frame watchdog interrupt */
+ mode |= EN_V_DOWN;
+ }
+ CAM_WRITE(pcdev, MODE, mode);
+ goto out;
+
+ } else {
+ dev_warn(dev, "%s: unhandled camera interrupt, status == %#x\n",
+ __func__, it_status);
+ goto out;
+ }
+
+ videobuf_done(pcdev, VIDEOBUF_ERROR);
+out:
+ spin_unlock_irqrestore(&pcdev->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static struct videobuf_queue_ops omap1_videobuf_ops = {
+ .buf_setup = omap1_videobuf_setup,
+ .buf_prepare = omap1_videobuf_prepare,
+ .buf_queue = omap1_videobuf_queue,
+ .buf_release = omap1_videobuf_release,
+};
+
+
+/*
+ * SOC Camera host operations
+ */
+
+static void sensor_reset(struct omap1_cam_dev *pcdev, bool reset)
+{
+ /* apply/release camera sensor reset if requested by platform data */
+ if (pcdev->pflags & OMAP1_CAMERA_RST_HIGH)
+ CAM_WRITE(pcdev, GPIO, reset);
+ else if (pcdev->pflags & OMAP1_CAMERA_RST_LOW)
+ CAM_WRITE(pcdev, GPIO, !reset);
+}
+
+/*
+ * The following two functions absolutely depend on the fact, that
+ * there can be only one camera on OMAP1 camera sensor interface
+ */
+static int omap1_cam_add_device(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+ u32 ctrlclock;
+
+ if (pcdev->icd)
+ return -EBUSY;
+
+ clk_enable(pcdev->clk);
+
+ /* setup sensor clock */
+ ctrlclock = CAM_READ(pcdev, CTRLCLOCK);
+ ctrlclock &= ~(CAMEXCLK_EN | MCLK_EN | DPLL_EN);
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock);
+
+ ctrlclock &= ~FOSCMOD_MASK;
+ switch (pcdev->camexclk) {
+ case 6000000:
+ ctrlclock |= CAMEXCLK_EN | FOSCMOD_6MHz;
+ break;
+ case 8000000:
+ ctrlclock |= CAMEXCLK_EN | FOSCMOD_8MHz | DPLL_EN;
+ break;
+ case 9600000:
+ ctrlclock |= CAMEXCLK_EN | FOSCMOD_9_6MHz | DPLL_EN;
+ break;
+ case 12000000:
+ ctrlclock |= CAMEXCLK_EN | FOSCMOD_12MHz;
+ break;
+ case 24000000:
+ ctrlclock |= CAMEXCLK_EN | FOSCMOD_24MHz | DPLL_EN;
+ default:
+ break;
+ }
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~DPLL_EN);
+
+ /* enable internal clock */
+ ctrlclock |= MCLK_EN;
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock);
+
+ sensor_reset(pcdev, false);
+
+ pcdev->icd = icd;
+
+ dev_dbg(icd->parent, "OMAP1 Camera driver attached to camera %d\n",
+ icd->devnum);
+ return 0;
+}
+
+static void omap1_cam_remove_device(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+ u32 ctrlclock;
+
+ BUG_ON(icd != pcdev->icd);
+
+ suspend_capture(pcdev);
+ disable_capture(pcdev);
+
+ sensor_reset(pcdev, true);
+
+ /* disable and release system clocks */
+ ctrlclock = CAM_READ_CACHE(pcdev, CTRLCLOCK);
+ ctrlclock &= ~(MCLK_EN | DPLL_EN | CAMEXCLK_EN);
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock);
+
+ ctrlclock = (ctrlclock & ~FOSCMOD_MASK) | FOSCMOD_12MHz;
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock);
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock | MCLK_EN);
+
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~MCLK_EN);
+
+ clk_disable(pcdev->clk);
+
+ pcdev->icd = NULL;
+
+ dev_dbg(icd->parent,
+ "OMAP1 Camera driver detached from camera %d\n", icd->devnum);
+}
+
+/* Duplicate standard formats based on host capability of byte swapping */
+static const struct soc_mbus_lookup omap1_cam_formats[] = {
+{
+ .code = V4L2_MBUS_FMT_UYVY8_2X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .name = "YUYV",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_VYUY8_2X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .name = "YVYU",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .name = "UYVY",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_YVYU8_2X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .name = "VYUY",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_RGB555,
+ .name = "RGB555",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_RGB555X,
+ .name = "RGB555X",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_RGB565_2X8_BE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .name = "RGB565",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_RGB565_2X8_LE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_RGB565X,
+ .name = "RGB565X",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+},
+};
+
+static int omap1_cam_get_formats(struct soc_camera_device *icd,
+ unsigned int idx, struct soc_camera_format_xlate *xlate)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct device *dev = icd->parent;
+ int formats = 0, ret;
+ enum v4l2_mbus_pixelcode code;
+ const struct soc_mbus_pixelfmt *fmt;
+
+ ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
+ if (ret < 0)
+ /* No more formats */
+ return 0;
+
+ fmt = soc_mbus_get_fmtdesc(code);
+ if (!fmt) {
+ dev_warn(dev, "%s: unsupported format code #%d: %d\n", __func__,
+ idx, code);
+ return 0;
+ }
+
+ /* Check support for the requested bits-per-sample */
+ if (fmt->bits_per_sample != 8)
+ return 0;
+
+ switch (code) {
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ case V4L2_MBUS_FMT_YVYU8_2X8:
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ case V4L2_MBUS_FMT_VYUY8_2X8:
+ case V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE:
+ case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE:
+ case V4L2_MBUS_FMT_RGB565_2X8_BE:
+ case V4L2_MBUS_FMT_RGB565_2X8_LE:
+ formats++;
+ if (xlate) {
+ xlate->host_fmt = soc_mbus_find_fmtdesc(code,
+ omap1_cam_formats,
+ ARRAY_SIZE(omap1_cam_formats));
+ xlate->code = code;
+ xlate++;
+ dev_dbg(dev,
+ "%s: providing format %s as byte swapped code #%d\n",
+ __func__, xlate->host_fmt->name, code);
+ }
+ default:
+ if (xlate)
+ dev_dbg(dev,
+ "%s: providing format %s in pass-through mode\n",
+ __func__, fmt->name);
+ }
+ formats++;
+ if (xlate) {
+ xlate->host_fmt = fmt;
+ xlate->code = code;
+ xlate++;
+ }
+
+ return formats;
+}
+
+static bool is_dma_aligned(s32 bytes_per_line, unsigned int height,
+ enum omap1_cam_vb_mode vb_mode)
+{
+ int size = bytes_per_line * height;
+
+ return IS_ALIGNED(bytes_per_line, DMA_ELEMENT_SIZE) &&
+ IS_ALIGNED(size, DMA_FRAME_SIZE(vb_mode) * DMA_ELEMENT_SIZE);
+}
+
+static int dma_align(int *width, int *height,
+ const struct soc_mbus_pixelfmt *fmt,
+ enum omap1_cam_vb_mode vb_mode, bool enlarge)
+{
+ s32 bytes_per_line = soc_mbus_bytes_per_line(*width, fmt);
+
+ if (bytes_per_line < 0)
+ return bytes_per_line;
+
+ if (!is_dma_aligned(bytes_per_line, *height, vb_mode)) {
+ unsigned int pxalign = __fls(bytes_per_line / *width);
+ unsigned int salign = DMA_FRAME_SHIFT(vb_mode) +
+ DMA_ELEMENT_SHIFT - pxalign;
+ unsigned int incr = enlarge << salign;
+
+ v4l_bound_align_image(width, 1, *width + incr, 0,
+ height, 1, *height + incr, 0, salign);
+ return 0;
+ }
+ return 1;
+}
+
+#define subdev_call_with_sense(pcdev, dev, icd, sd, function, args...) \
+({ \
+ struct soc_camera_sense sense = { \
+ .master_clock = pcdev->camexclk, \
+ .pixel_clock_max = 0, \
+ }; \
+ int __ret; \
+ \
+ if (pcdev->pdata) \
+ sense.pixel_clock_max = pcdev->pdata->lclk_khz_max * 1000; \
+ icd->sense = &sense; \
+ __ret = v4l2_subdev_call(sd, video, function, ##args); \
+ icd->sense = NULL; \
+ \
+ if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) { \
+ if (sense.pixel_clock > sense.pixel_clock_max) { \
+ dev_err(dev, \
+ "%s: pixel clock %lu set by the camera too high!\n", \
+ __func__, sense.pixel_clock); \
+ __ret = -EINVAL; \
+ } \
+ } \
+ __ret; \
+})
+
+static int set_mbus_format(struct omap1_cam_dev *pcdev, struct device *dev,
+ struct soc_camera_device *icd, struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf,
+ const struct soc_camera_format_xlate *xlate)
+{
+ s32 bytes_per_line;
+ int ret = subdev_call_with_sense(pcdev, dev, icd, sd, s_mbus_fmt, mf);
+
+ if (ret < 0) {
+ dev_err(dev, "%s: s_mbus_fmt failed\n", __func__);
+ return ret;
+ }
+
+ if (mf->code != xlate->code) {
+ dev_err(dev, "%s: unexpected pixel code change\n", __func__);
+ return -EINVAL;
+ }
+
+ bytes_per_line = soc_mbus_bytes_per_line(mf->width, xlate->host_fmt);
+ if (bytes_per_line < 0) {
+ dev_err(dev, "%s: soc_mbus_bytes_per_line() failed\n",
+ __func__);
+ return bytes_per_line;
+ }
+
+ if (!is_dma_aligned(bytes_per_line, mf->height, pcdev->vb_mode)) {
+ dev_err(dev, "%s: resulting geometry %ux%u not DMA aligned\n",
+ __func__, mf->width, mf->height);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int omap1_cam_set_crop(struct soc_camera_device *icd,
+ struct v4l2_crop *crop)
+{
+ struct v4l2_rect *rect = &crop->c;
+ const struct soc_camera_format_xlate *xlate = icd->current_fmt;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct device *dev = icd->parent;
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
+ struct omap1_cam_dev *pcdev = ici->priv;
+ struct v4l2_mbus_framefmt mf;
+ int ret;
+
+ ret = subdev_call_with_sense(pcdev, dev, icd, sd, s_crop, crop);
+ if (ret < 0) {
+ dev_warn(dev, "%s: failed to crop to %ux%u@%u:%u\n", __func__,
+ rect->width, rect->height, rect->left, rect->top);
+ return ret;
+ }
+
+ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
+ if (ret < 0) {
+ dev_warn(dev, "%s: failed to fetch current format\n", __func__);
+ return ret;
+ }
+
+ ret = dma_align(&mf.width, &mf.height, xlate->host_fmt, pcdev->vb_mode,
+ false);
+ if (ret < 0) {
+ dev_err(dev, "%s: failed to align %ux%u %s with DMA\n",
+ __func__, mf.width, mf.height,
+ xlate->host_fmt->name);
+ return ret;
+ }
+
+ if (!ret) {
+ /* sensor returned geometry not DMA aligned, trying to fix */
+ ret = set_mbus_format(pcdev, dev, icd, sd, &mf, xlate);
+ if (ret < 0) {
+ dev_err(dev, "%s: failed to set format\n", __func__);
+ return ret;
+ }
+ }
+
+ icd->user_width = mf.width;
+ icd->user_height = mf.height;
+
+ return 0;
+}
+
+static int omap1_cam_set_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ const struct soc_camera_format_xlate *xlate;
+ struct device *dev = icd->parent;
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
+ struct omap1_cam_dev *pcdev = ici->priv;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_mbus_framefmt mf;
+ int ret;
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+ if (!xlate) {
+ dev_warn(dev, "%s: format %#x not found\n", __func__,
+ pix->pixelformat);
+ return -EINVAL;
+ }
+
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
+
+ ret = dma_align(&mf.width, &mf.height, xlate->host_fmt, pcdev->vb_mode,
+ true);
+ if (ret < 0) {
+ dev_err(dev, "%s: failed to align %ux%u %s with DMA\n",
+ __func__, pix->width, pix->height,
+ xlate->host_fmt->name);
+ return ret;
+ }
+
+ ret = set_mbus_format(pcdev, dev, icd, sd, &mf, xlate);
+ if (ret < 0) {
+ dev_err(dev, "%s: failed to set format\n", __func__);
+ return ret;
+ }
+
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->field = mf.field;
+ pix->colorspace = mf.colorspace;
+ icd->current_fmt = xlate;
+
+ return 0;
+}
+
+static int omap1_cam_try_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ const struct soc_camera_format_xlate *xlate;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_mbus_framefmt mf;
+ int ret;
+ /* TODO: limit to mx1 hardware capabilities */
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+ if (!xlate) {
+ dev_warn(icd->parent, "Format %#x not found\n",
+ pix->pixelformat);
+ return -EINVAL;
+ }
+
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
+
+ /* limit to sensor capabilities */
+ ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->field = mf.field;
+ pix->colorspace = mf.colorspace;
+
+ return 0;
+}
+
+static bool sg_mode;
+
+/*
+ * Local mmap_mapper wrapper,
+ * used for detecting videobuf-dma-contig buffer allocation failures
+ * and switching to videobuf-dma-sg automatically for future attempts.
+ */
+static int omap1_cam_mmap_mapper(struct videobuf_queue *q,
+ struct videobuf_buffer *buf,
+ struct vm_area_struct *vma)
+{
+ struct soc_camera_device *icd = q->priv_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+ int ret;
+
+ ret = pcdev->mmap_mapper(q, buf, vma);
+
+ if (ret == -ENOMEM)
+ sg_mode = true;
+
+ return ret;
+}
+
+static void omap1_cam_init_videobuf(struct videobuf_queue *q,
+ struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+
+ if (!sg_mode)
+ videobuf_queue_dma_contig_init(q, &omap1_videobuf_ops,
+ icd->parent, &pcdev->lock,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
+ sizeof(struct omap1_cam_buf), icd, &icd->video_lock);
+ else
+ videobuf_queue_sg_init(q, &omap1_videobuf_ops,
+ icd->parent, &pcdev->lock,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
+ sizeof(struct omap1_cam_buf), icd, &icd->video_lock);
+
+ /* use videobuf mode (auto)selected with the module parameter */
+ pcdev->vb_mode = sg_mode ? OMAP1_CAM_DMA_SG : OMAP1_CAM_DMA_CONTIG;
+
+ /*
+ * Ensure we substitute the videobuf-dma-contig version of the
+ * mmap_mapper() callback with our own wrapper, used for switching
+ * automatically to videobuf-dma-sg on buffer allocation failure.
+ */
+ if (!sg_mode && q->int_ops->mmap_mapper != omap1_cam_mmap_mapper) {
+ pcdev->mmap_mapper = q->int_ops->mmap_mapper;
+ q->int_ops->mmap_mapper = omap1_cam_mmap_mapper;
+ }
+}
+
+static int omap1_cam_reqbufs(struct soc_camera_device *icd,
+ struct v4l2_requestbuffers *p)
+{
+ int i;
+
+ /*
+ * This is for locking debugging only. I removed spinlocks and now I
+ * check whether .prepare is ever called on a linked buffer, or whether
+ * a dma IRQ can occur for an in-work or unlinked buffer. Until now
+ * it hadn't triggered
+ */
+ for (i = 0; i < p->count; i++) {
+ struct omap1_cam_buf *buf = container_of(icd->vb_vidq.bufs[i],
+ struct omap1_cam_buf, vb);
+ buf->inwork = 0;
+ INIT_LIST_HEAD(&buf->vb.queue);
+ }
+
+ return 0;
+}
+
+static int omap1_cam_querycap(struct soc_camera_host *ici,
+ struct v4l2_capability *cap)
+{
+ /* cap->name is set by the friendly caller:-> */
+ strlcpy(cap->card, "OMAP1 Camera", sizeof(cap->card));
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+
+ return 0;
+}
+
+static int omap1_cam_set_bus_param(struct soc_camera_device *icd)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct device *dev = icd->parent;
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
+ struct omap1_cam_dev *pcdev = ici->priv;
+ u32 pixfmt = icd->current_fmt->host_fmt->fourcc;
+ const struct soc_camera_format_xlate *xlate;
+ const struct soc_mbus_pixelfmt *fmt;
+ struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ unsigned long common_flags;
+ u32 ctrlclock, mode;
+ int ret;
+
+ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+ if (!ret) {
+ common_flags = soc_mbus_config_compatible(&cfg, SOCAM_BUS_FLAGS);
+ if (!common_flags) {
+ dev_warn(dev,
+ "Flags incompatible: camera 0x%x, host 0x%x\n",
+ cfg.flags, SOCAM_BUS_FLAGS);
+ return -EINVAL;
+ }
+ } else if (ret != -ENOIOCTLCMD) {
+ return ret;
+ } else {
+ common_flags = SOCAM_BUS_FLAGS;
+ }
+
+ /* Make choices, possibly based on platform configuration */
+ if ((common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) &&
+ (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)) {
+ if (!pcdev->pdata ||
+ pcdev->pdata->flags & OMAP1_CAMERA_LCLK_RISING)
+ common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_FALLING;
+ else
+ common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_RISING;
+ }
+
+ cfg.flags = common_flags;
+ ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_dbg(dev, "camera s_mbus_config(0x%lx) returned %d\n",
+ common_flags, ret);
+ return ret;
+ }
+
+ ctrlclock = CAM_READ_CACHE(pcdev, CTRLCLOCK);
+ if (ctrlclock & LCLK_EN)
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~LCLK_EN);
+
+ if (common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) {
+ dev_dbg(dev, "CTRLCLOCK_REG |= POLCLK\n");
+ ctrlclock |= POLCLK;
+ } else {
+ dev_dbg(dev, "CTRLCLOCK_REG &= ~POLCLK\n");
+ ctrlclock &= ~POLCLK;
+ }
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~LCLK_EN);
+
+ if (ctrlclock & LCLK_EN)
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock);
+
+ /* select bus endianess */
+ xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+ fmt = xlate->host_fmt;
+
+ mode = CAM_READ(pcdev, MODE) & ~(RAZ_FIFO | IRQ_MASK | DMA);
+ if (fmt->order == SOC_MBUS_ORDER_LE) {
+ dev_dbg(dev, "MODE_REG &= ~ORDERCAMD\n");
+ CAM_WRITE(pcdev, MODE, mode & ~ORDERCAMD);
+ } else {
+ dev_dbg(dev, "MODE_REG |= ORDERCAMD\n");
+ CAM_WRITE(pcdev, MODE, mode | ORDERCAMD);
+ }
+
+ return 0;
+}
+
+static unsigned int omap1_cam_poll(struct file *file, poll_table *pt)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct omap1_cam_buf *buf;
+
+ buf = list_entry(icd->vb_vidq.stream.next, struct omap1_cam_buf,
+ vb.stream);
+
+ poll_wait(file, &buf->vb.done, pt);
+
+ if (buf->vb.state == VIDEOBUF_DONE ||
+ buf->vb.state == VIDEOBUF_ERROR)
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static struct soc_camera_host_ops omap1_host_ops = {
+ .owner = THIS_MODULE,
+ .add = omap1_cam_add_device,
+ .remove = omap1_cam_remove_device,
+ .get_formats = omap1_cam_get_formats,
+ .set_crop = omap1_cam_set_crop,
+ .set_fmt = omap1_cam_set_fmt,
+ .try_fmt = omap1_cam_try_fmt,
+ .init_videobuf = omap1_cam_init_videobuf,
+ .reqbufs = omap1_cam_reqbufs,
+ .querycap = omap1_cam_querycap,
+ .set_bus_param = omap1_cam_set_bus_param,
+ .poll = omap1_cam_poll,
+};
+
+static int __init omap1_cam_probe(struct platform_device *pdev)
+{
+ struct omap1_cam_dev *pcdev;
+ struct resource *res;
+ struct clk *clk;
+ void __iomem *base;
+ unsigned int irq;
+ int err = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!res || (int)irq <= 0) {
+ err = -ENODEV;
+ goto exit;
+ }
+
+ clk = clk_get(&pdev->dev, "armper_ck");
+ if (IS_ERR(clk)) {
+ err = PTR_ERR(clk);
+ goto exit;
+ }
+
+ pcdev = kzalloc(sizeof(*pcdev) + resource_size(res), GFP_KERNEL);
+ if (!pcdev) {
+ dev_err(&pdev->dev, "Could not allocate pcdev\n");
+ err = -ENOMEM;
+ goto exit_put_clk;
+ }
+
+ pcdev->res = res;
+ pcdev->clk = clk;
+
+ pcdev->pdata = pdev->dev.platform_data;
+ if (pcdev->pdata) {
+ pcdev->pflags = pcdev->pdata->flags;
+ pcdev->camexclk = pcdev->pdata->camexclk_khz * 1000;
+ }
+
+ switch (pcdev->camexclk) {
+ case 6000000:
+ case 8000000:
+ case 9600000:
+ case 12000000:
+ case 24000000:
+ break;
+ default:
+ /* pcdev->camexclk != 0 => pcdev->pdata != NULL */
+ dev_warn(&pdev->dev,
+ "Incorrect sensor clock frequency %ld kHz, "
+ "should be one of 0, 6, 8, 9.6, 12 or 24 MHz, "
+ "please correct your platform data\n",
+ pcdev->pdata->camexclk_khz);
+ pcdev->camexclk = 0;
+ case 0:
+ dev_info(&pdev->dev, "Not providing sensor clock\n");
+ }
+
+ INIT_LIST_HEAD(&pcdev->capture);
+ spin_lock_init(&pcdev->lock);
+
+ /*
+ * Request the region.
+ */
+ if (!request_mem_region(res->start, resource_size(res), DRIVER_NAME)) {
+ err = -EBUSY;
+ goto exit_kfree;
+ }
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base) {
+ err = -ENOMEM;
+ goto exit_release;
+ }
+ pcdev->irq = irq;
+ pcdev->base = base;
+
+ sensor_reset(pcdev, true);
+
+ err = omap_request_dma(OMAP_DMA_CAMERA_IF_RX, DRIVER_NAME,
+ dma_isr, (void *)pcdev, &pcdev->dma_ch);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Can't request DMA for OMAP1 Camera\n");
+ err = -EBUSY;
+ goto exit_iounmap;
+ }
+ dev_dbg(&pdev->dev, "got DMA channel %d\n", pcdev->dma_ch);
+
+ /* preconfigure DMA */
+ omap_set_dma_src_params(pcdev->dma_ch, OMAP_DMA_PORT_TIPB,
+ OMAP_DMA_AMODE_CONSTANT, res->start + REG_CAMDATA,
+ 0, 0);
+ omap_set_dma_dest_burst_mode(pcdev->dma_ch, OMAP_DMA_DATA_BURST_4);
+ /* setup DMA autoinitialization */
+ omap_dma_link_lch(pcdev->dma_ch, pcdev->dma_ch);
+
+ err = request_irq(pcdev->irq, cam_isr, 0, DRIVER_NAME, pcdev);
+ if (err) {
+ dev_err(&pdev->dev, "Camera interrupt register failed\n");
+ goto exit_free_dma;
+ }
+
+ pcdev->soc_host.drv_name = DRIVER_NAME;
+ pcdev->soc_host.ops = &omap1_host_ops;
+ pcdev->soc_host.priv = pcdev;
+ pcdev->soc_host.v4l2_dev.dev = &pdev->dev;
+ pcdev->soc_host.nr = pdev->id;
+
+ err = soc_camera_host_register(&pcdev->soc_host);
+ if (err)
+ goto exit_free_irq;
+
+ dev_info(&pdev->dev, "OMAP1 Camera Interface driver loaded\n");
+
+ return 0;
+
+exit_free_irq:
+ free_irq(pcdev->irq, pcdev);
+exit_free_dma:
+ omap_free_dma(pcdev->dma_ch);
+exit_iounmap:
+ iounmap(base);
+exit_release:
+ release_mem_region(res->start, resource_size(res));
+exit_kfree:
+ kfree(pcdev);
+exit_put_clk:
+ clk_put(clk);
+exit:
+ return err;
+}
+
+static int __exit omap1_cam_remove(struct platform_device *pdev)
+{
+ struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
+ struct omap1_cam_dev *pcdev = container_of(soc_host,
+ struct omap1_cam_dev, soc_host);
+ struct resource *res;
+
+ free_irq(pcdev->irq, pcdev);
+
+ omap_free_dma(pcdev->dma_ch);
+
+ soc_camera_host_unregister(soc_host);
+
+ iounmap(pcdev->base);
+
+ res = pcdev->res;
+ release_mem_region(res->start, resource_size(res));
+
+ clk_put(pcdev->clk);
+
+ kfree(pcdev);
+
+ dev_info(&pdev->dev, "OMAP1 Camera Interface driver unloaded\n");
+
+ return 0;
+}
+
+static struct platform_driver omap1_cam_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = omap1_cam_probe,
+ .remove = __exit_p(omap1_cam_remove),
+};
+
+module_platform_driver(omap1_cam_driver);
+
+module_param(sg_mode, bool, 0644);
+MODULE_PARM_DESC(sg_mode, "videobuf mode, 0: dma-contig (default), 1: dma-sg");
+
+MODULE_DESCRIPTION("OMAP1 Camera Interface driver");
+MODULE_AUTHOR("Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/media/platform/soc_camera/pxa_camera.c b/drivers/media/platform/soc_camera/pxa_camera.c
new file mode 100644
index 0000000..1e3776d
--- /dev/null
+++ b/drivers/media/platform/soc_camera/pxa_camera.c
@@ -0,0 +1,1852 @@
+/*
+ * V4L2 Driver for PXA camera host
+ *
+ * Copyright (C) 2006, Sascha Hauer, Pengutronix
+ * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/moduleparam.h>
+#include <linux/time.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-dev.h>
+#include <media/videobuf-dma-sg.h>
+#include <media/soc_camera.h>
+#include <media/soc_mediabus.h>
+
+#include <linux/videodev2.h>
+
+#include <mach/dma.h>
+#include <linux/platform_data/camera-pxa.h>
+
+#define PXA_CAM_VERSION "0.0.6"
+#define PXA_CAM_DRV_NAME "pxa27x-camera"
+
+/* Camera Interface */
+#define CICR0 0x0000
+#define CICR1 0x0004
+#define CICR2 0x0008
+#define CICR3 0x000C
+#define CICR4 0x0010
+#define CISR 0x0014
+#define CIFR 0x0018
+#define CITOR 0x001C
+#define CIBR0 0x0028
+#define CIBR1 0x0030
+#define CIBR2 0x0038
+
+#define CICR0_DMAEN (1 << 31) /* DMA request enable */
+#define CICR0_PAR_EN (1 << 30) /* Parity enable */
+#define CICR0_SL_CAP_EN (1 << 29) /* Capture enable for slave mode */
+#define CICR0_ENB (1 << 28) /* Camera interface enable */
+#define CICR0_DIS (1 << 27) /* Camera interface disable */
+#define CICR0_SIM (0x7 << 24) /* Sensor interface mode mask */
+#define CICR0_TOM (1 << 9) /* Time-out mask */
+#define CICR0_RDAVM (1 << 8) /* Receive-data-available mask */
+#define CICR0_FEM (1 << 7) /* FIFO-empty mask */
+#define CICR0_EOLM (1 << 6) /* End-of-line mask */
+#define CICR0_PERRM (1 << 5) /* Parity-error mask */
+#define CICR0_QDM (1 << 4) /* Quick-disable mask */
+#define CICR0_CDM (1 << 3) /* Disable-done mask */
+#define CICR0_SOFM (1 << 2) /* Start-of-frame mask */
+#define CICR0_EOFM (1 << 1) /* End-of-frame mask */
+#define CICR0_FOM (1 << 0) /* FIFO-overrun mask */
+
+#define CICR1_TBIT (1 << 31) /* Transparency bit */
+#define CICR1_RGBT_CONV (0x3 << 29) /* RGBT conversion mask */
+#define CICR1_PPL (0x7ff << 15) /* Pixels per line mask */
+#define CICR1_RGB_CONV (0x7 << 12) /* RGB conversion mask */
+#define CICR1_RGB_F (1 << 11) /* RGB format */
+#define CICR1_YCBCR_F (1 << 10) /* YCbCr format */
+#define CICR1_RGB_BPP (0x7 << 7) /* RGB bis per pixel mask */
+#define CICR1_RAW_BPP (0x3 << 5) /* Raw bis per pixel mask */
+#define CICR1_COLOR_SP (0x3 << 3) /* Color space mask */
+#define CICR1_DW (0x7 << 0) /* Data width mask */
+
+#define CICR2_BLW (0xff << 24) /* Beginning-of-line pixel clock
+ wait count mask */
+#define CICR2_ELW (0xff << 16) /* End-of-line pixel clock
+ wait count mask */
+#define CICR2_HSW (0x3f << 10) /* Horizontal sync pulse width mask */
+#define CICR2_BFPW (0x3f << 3) /* Beginning-of-frame pixel clock
+ wait count mask */
+#define CICR2_FSW (0x7 << 0) /* Frame stabilization
+ wait count mask */
+
+#define CICR3_BFW (0xff << 24) /* Beginning-of-frame line clock
+ wait count mask */
+#define CICR3_EFW (0xff << 16) /* End-of-frame line clock
+ wait count mask */
+#define CICR3_VSW (0x3f << 10) /* Vertical sync pulse width mask */
+#define CICR3_BFPW (0x3f << 3) /* Beginning-of-frame pixel clock
+ wait count mask */
+#define CICR3_LPF (0x7ff << 0) /* Lines per frame mask */
+
+#define CICR4_MCLK_DLY (0x3 << 24) /* MCLK Data Capture Delay mask */
+#define CICR4_PCLK_EN (1 << 23) /* Pixel clock enable */
+#define CICR4_PCP (1 << 22) /* Pixel clock polarity */
+#define CICR4_HSP (1 << 21) /* Horizontal sync polarity */
+#define CICR4_VSP (1 << 20) /* Vertical sync polarity */
+#define CICR4_MCLK_EN (1 << 19) /* MCLK enable */
+#define CICR4_FR_RATE (0x7 << 8) /* Frame rate mask */
+#define CICR4_DIV (0xff << 0) /* Clock divisor mask */
+
+#define CISR_FTO (1 << 15) /* FIFO time-out */
+#define CISR_RDAV_2 (1 << 14) /* Channel 2 receive data available */
+#define CISR_RDAV_1 (1 << 13) /* Channel 1 receive data available */
+#define CISR_RDAV_0 (1 << 12) /* Channel 0 receive data available */
+#define CISR_FEMPTY_2 (1 << 11) /* Channel 2 FIFO empty */
+#define CISR_FEMPTY_1 (1 << 10) /* Channel 1 FIFO empty */
+#define CISR_FEMPTY_0 (1 << 9) /* Channel 0 FIFO empty */
+#define CISR_EOL (1 << 8) /* End of line */
+#define CISR_PAR_ERR (1 << 7) /* Parity error */
+#define CISR_CQD (1 << 6) /* Camera interface quick disable */
+#define CISR_CDD (1 << 5) /* Camera interface disable done */
+#define CISR_SOF (1 << 4) /* Start of frame */
+#define CISR_EOF (1 << 3) /* End of frame */
+#define CISR_IFO_2 (1 << 2) /* FIFO overrun for Channel 2 */
+#define CISR_IFO_1 (1 << 1) /* FIFO overrun for Channel 1 */
+#define CISR_IFO_0 (1 << 0) /* FIFO overrun for Channel 0 */
+
+#define CIFR_FLVL2 (0x7f << 23) /* FIFO 2 level mask */
+#define CIFR_FLVL1 (0x7f << 16) /* FIFO 1 level mask */
+#define CIFR_FLVL0 (0xff << 8) /* FIFO 0 level mask */
+#define CIFR_THL_0 (0x3 << 4) /* Threshold Level for Channel 0 FIFO */
+#define CIFR_RESET_F (1 << 3) /* Reset input FIFOs */
+#define CIFR_FEN2 (1 << 2) /* FIFO enable for channel 2 */
+#define CIFR_FEN1 (1 << 1) /* FIFO enable for channel 1 */
+#define CIFR_FEN0 (1 << 0) /* FIFO enable for channel 0 */
+
+#define CICR0_SIM_MP (0 << 24)
+#define CICR0_SIM_SP (1 << 24)
+#define CICR0_SIM_MS (2 << 24)
+#define CICR0_SIM_EP (3 << 24)
+#define CICR0_SIM_ES (4 << 24)
+
+#define CICR1_DW_VAL(x) ((x) & CICR1_DW) /* Data bus width */
+#define CICR1_PPL_VAL(x) (((x) << 15) & CICR1_PPL) /* Pixels per line */
+#define CICR1_COLOR_SP_VAL(x) (((x) << 3) & CICR1_COLOR_SP) /* color space */
+#define CICR1_RGB_BPP_VAL(x) (((x) << 7) & CICR1_RGB_BPP) /* bpp for rgb */
+#define CICR1_RGBT_CONV_VAL(x) (((x) << 29) & CICR1_RGBT_CONV) /* rgbt conv */
+
+#define CICR2_BLW_VAL(x) (((x) << 24) & CICR2_BLW) /* Beginning-of-line pixel clock wait count */
+#define CICR2_ELW_VAL(x) (((x) << 16) & CICR2_ELW) /* End-of-line pixel clock wait count */
+#define CICR2_HSW_VAL(x) (((x) << 10) & CICR2_HSW) /* Horizontal sync pulse width */
+#define CICR2_BFPW_VAL(x) (((x) << 3) & CICR2_BFPW) /* Beginning-of-frame pixel clock wait count */
+#define CICR2_FSW_VAL(x) (((x) << 0) & CICR2_FSW) /* Frame stabilization wait count */
+
+#define CICR3_BFW_VAL(x) (((x) << 24) & CICR3_BFW) /* Beginning-of-frame line clock wait count */
+#define CICR3_EFW_VAL(x) (((x) << 16) & CICR3_EFW) /* End-of-frame line clock wait count */
+#define CICR3_VSW_VAL(x) (((x) << 11) & CICR3_VSW) /* Vertical sync pulse width */
+#define CICR3_LPF_VAL(x) (((x) << 0) & CICR3_LPF) /* Lines per frame */
+
+#define CICR0_IRQ_MASK (CICR0_TOM | CICR0_RDAVM | CICR0_FEM | CICR0_EOLM | \
+ CICR0_PERRM | CICR0_QDM | CICR0_CDM | CICR0_SOFM | \
+ CICR0_EOFM | CICR0_FOM)
+
+/*
+ * Structures
+ */
+enum pxa_camera_active_dma {
+ DMA_Y = 0x1,
+ DMA_U = 0x2,
+ DMA_V = 0x4,
+};
+
+/* descriptor needed for the PXA DMA engine */
+struct pxa_cam_dma {
+ dma_addr_t sg_dma;
+ struct pxa_dma_desc *sg_cpu;
+ size_t sg_size;
+ int sglen;
+};
+
+/* buffer for one video frame */
+struct pxa_buffer {
+ /* common v4l buffer stuff -- must be first */
+ struct videobuf_buffer vb;
+ enum v4l2_mbus_pixelcode code;
+ /* our descriptor lists for Y, U and V channels */
+ struct pxa_cam_dma dmas[3];
+ int inwork;
+ enum pxa_camera_active_dma active_dma;
+};
+
+struct pxa_camera_dev {
+ struct soc_camera_host soc_host;
+ /*
+ * PXA27x is only supposed to handle one camera on its Quick Capture
+ * interface. If anyone ever builds hardware to enable more than
+ * one camera, they will have to modify this driver too
+ */
+ struct soc_camera_device *icd;
+ struct clk *clk;
+
+ unsigned int irq;
+ void __iomem *base;
+
+ int channels;
+ unsigned int dma_chans[3];
+
+ struct pxacamera_platform_data *pdata;
+ struct resource *res;
+ unsigned long platform_flags;
+ unsigned long ciclk;
+ unsigned long mclk;
+ u32 mclk_divisor;
+ u16 width_flags; /* max 10 bits */
+
+ struct list_head capture;
+
+ spinlock_t lock;
+
+ struct pxa_buffer *active;
+ struct pxa_dma_desc *sg_tail[3];
+
+ u32 save_cicr[5];
+};
+
+struct pxa_cam {
+ unsigned long flags;
+};
+
+static const char *pxa_cam_driver_description = "PXA_Camera";
+
+static unsigned int vid_limit = 16; /* Video memory limit, in Mb */
+
+/*
+ * Videobuf operations
+ */
+static int pxa_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
+ unsigned int *size)
+{
+ struct soc_camera_device *icd = vq->priv_data;
+
+ dev_dbg(icd->parent, "count=%d, size=%d\n", *count, *size);
+
+ *size = icd->sizeimage;
+
+ if (0 == *count)
+ *count = 32;
+ if (*size * *count > vid_limit * 1024 * 1024)
+ *count = (vid_limit * 1024 * 1024) / *size;
+
+ return 0;
+}
+
+static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf)
+{
+ struct soc_camera_device *icd = vq->priv_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
+ int i;
+
+ BUG_ON(in_interrupt());
+
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+ &buf->vb, buf->vb.baddr, buf->vb.bsize);
+
+ /*
+ * This waits until this buffer is out of danger, i.e., until it is no
+ * longer in STATE_QUEUED or STATE_ACTIVE
+ */
+ videobuf_waiton(vq, &buf->vb, 0, 0);
+ videobuf_dma_unmap(vq->dev, dma);
+ videobuf_dma_free(dma);
+
+ for (i = 0; i < ARRAY_SIZE(buf->dmas); i++) {
+ if (buf->dmas[i].sg_cpu)
+ dma_free_coherent(ici->v4l2_dev.dev,
+ buf->dmas[i].sg_size,
+ buf->dmas[i].sg_cpu,
+ buf->dmas[i].sg_dma);
+ buf->dmas[i].sg_cpu = NULL;
+ }
+
+ buf->vb.state = VIDEOBUF_NEEDS_INIT;
+}
+
+static int calculate_dma_sglen(struct scatterlist *sglist, int sglen,
+ int sg_first_ofs, int size)
+{
+ int i, offset, dma_len, xfer_len;
+ struct scatterlist *sg;
+
+ offset = sg_first_ofs;
+ for_each_sg(sglist, sg, sglen, i) {
+ dma_len = sg_dma_len(sg);
+
+ /* PXA27x Developer's Manual 27.4.4.1: round up to 8 bytes */
+ xfer_len = roundup(min(dma_len - offset, size), 8);
+
+ size = max(0, size - xfer_len);
+ offset = 0;
+ if (size == 0)
+ break;
+ }
+
+ BUG_ON(size != 0);
+ return i + 1;
+}
+
+/**
+ * pxa_init_dma_channel - init dma descriptors
+ * @pcdev: pxa camera device
+ * @buf: pxa buffer to find pxa dma channel
+ * @dma: dma video buffer
+ * @channel: dma channel (0 => 'Y', 1 => 'U', 2 => 'V')
+ * @cibr: camera Receive Buffer Register
+ * @size: bytes to transfer
+ * @sg_first: first element of sg_list
+ * @sg_first_ofs: offset in first element of sg_list
+ *
+ * Prepares the pxa dma descriptors to transfer one camera channel.
+ * Beware sg_first and sg_first_ofs are both input and output parameters.
+ *
+ * Returns 0 or -ENOMEM if no coherent memory is available
+ */
+static int pxa_init_dma_channel(struct pxa_camera_dev *pcdev,
+ struct pxa_buffer *buf,
+ struct videobuf_dmabuf *dma, int channel,
+ int cibr, int size,
+ struct scatterlist **sg_first, int *sg_first_ofs)
+{
+ struct pxa_cam_dma *pxa_dma = &buf->dmas[channel];
+ struct device *dev = pcdev->soc_host.v4l2_dev.dev;
+ struct scatterlist *sg;
+ int i, offset, sglen;
+ int dma_len = 0, xfer_len = 0;
+
+ if (pxa_dma->sg_cpu)
+ dma_free_coherent(dev, pxa_dma->sg_size,
+ pxa_dma->sg_cpu, pxa_dma->sg_dma);
+
+ sglen = calculate_dma_sglen(*sg_first, dma->sglen,
+ *sg_first_ofs, size);
+
+ pxa_dma->sg_size = (sglen + 1) * sizeof(struct pxa_dma_desc);
+ pxa_dma->sg_cpu = dma_alloc_coherent(dev, pxa_dma->sg_size,
+ &pxa_dma->sg_dma, GFP_KERNEL);
+ if (!pxa_dma->sg_cpu)
+ return -ENOMEM;
+
+ pxa_dma->sglen = sglen;
+ offset = *sg_first_ofs;
+
+ dev_dbg(dev, "DMA: sg_first=%p, sglen=%d, ofs=%d, dma.desc=%x\n",
+ *sg_first, sglen, *sg_first_ofs, pxa_dma->sg_dma);
+
+
+ for_each_sg(*sg_first, sg, sglen, i) {
+ dma_len = sg_dma_len(sg);
+
+ /* PXA27x Developer's Manual 27.4.4.1: round up to 8 bytes */
+ xfer_len = roundup(min(dma_len - offset, size), 8);
+
+ size = max(0, size - xfer_len);
+
+ pxa_dma->sg_cpu[i].dsadr = pcdev->res->start + cibr;
+ pxa_dma->sg_cpu[i].dtadr = sg_dma_address(sg) + offset;
+ pxa_dma->sg_cpu[i].dcmd =
+ DCMD_FLOWSRC | DCMD_BURST8 | DCMD_INCTRGADDR | xfer_len;
+#ifdef DEBUG
+ if (!i)
+ pxa_dma->sg_cpu[i].dcmd |= DCMD_STARTIRQEN;
+#endif
+ pxa_dma->sg_cpu[i].ddadr =
+ pxa_dma->sg_dma + (i + 1) * sizeof(struct pxa_dma_desc);
+
+ dev_vdbg(dev, "DMA: desc.%08x->@phys=0x%08x, len=%d\n",
+ pxa_dma->sg_dma + i * sizeof(struct pxa_dma_desc),
+ sg_dma_address(sg) + offset, xfer_len);
+ offset = 0;
+
+ if (size == 0)
+ break;
+ }
+
+ pxa_dma->sg_cpu[sglen].ddadr = DDADR_STOP;
+ pxa_dma->sg_cpu[sglen].dcmd = DCMD_FLOWSRC | DCMD_BURST8 | DCMD_ENDIRQEN;
+
+ /*
+ * Handle 1 special case :
+ * - in 3 planes (YUV422P format), we might finish with xfer_len equal
+ * to dma_len (end on PAGE boundary). In this case, the sg element
+ * for next plane should be the next after the last used to store the
+ * last scatter gather RAM page
+ */
+ if (xfer_len >= dma_len) {
+ *sg_first_ofs = xfer_len - dma_len;
+ *sg_first = sg_next(sg);
+ } else {
+ *sg_first_ofs = xfer_len;
+ *sg_first = sg;
+ }
+
+ return 0;
+}
+
+static void pxa_videobuf_set_actdma(struct pxa_camera_dev *pcdev,
+ struct pxa_buffer *buf)
+{
+ buf->active_dma = DMA_Y;
+ if (pcdev->channels == 3)
+ buf->active_dma |= DMA_U | DMA_V;
+}
+
+/*
+ * Please check the DMA prepared buffer structure in :
+ * Documentation/video4linux/pxa_camera.txt
+ * Please check also in pxa_camera_check_link_miss() to understand why DMA chain
+ * modification while DMA chain is running will work anyway.
+ */
+static int pxa_videobuf_prepare(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb, enum v4l2_field field)
+{
+ struct soc_camera_device *icd = vq->priv_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct pxa_camera_dev *pcdev = ici->priv;
+ struct device *dev = pcdev->soc_host.v4l2_dev.dev;
+ struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
+ int ret;
+ int size_y, size_u = 0, size_v = 0;
+
+ dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+ vb, vb->baddr, vb->bsize);
+
+ /* Added list head initialization on alloc */
+ WARN_ON(!list_empty(&vb->queue));
+
+#ifdef DEBUG
+ /*
+ * This can be useful if you want to see if we actually fill
+ * the buffer with something
+ */
+ memset((void *)vb->baddr, 0xaa, vb->bsize);
+#endif
+
+ BUG_ON(NULL == icd->current_fmt);
+
+ /*
+ * I think, in buf_prepare you only have to protect global data,
+ * the actual buffer is yours
+ */
+ buf->inwork = 1;
+
+ if (buf->code != icd->current_fmt->code ||
+ vb->width != icd->user_width ||
+ vb->height != icd->user_height ||
+ vb->field != field) {
+ buf->code = icd->current_fmt->code;
+ vb->width = icd->user_width;
+ vb->height = icd->user_height;
+ vb->field = field;
+ vb->state = VIDEOBUF_NEEDS_INIT;
+ }
+
+ vb->size = icd->sizeimage;
+ if (0 != vb->baddr && vb->bsize < vb->size) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (vb->state == VIDEOBUF_NEEDS_INIT) {
+ int size = vb->size;
+ int next_ofs = 0;
+ struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
+ struct scatterlist *sg;
+
+ ret = videobuf_iolock(vq, vb, NULL);
+ if (ret)
+ goto fail;
+
+ if (pcdev->channels == 3) {
+ size_y = size / 2;
+ size_u = size_v = size / 4;
+ } else {
+ size_y = size;
+ }
+
+ sg = dma->sglist;
+
+ /* init DMA for Y channel */
+ ret = pxa_init_dma_channel(pcdev, buf, dma, 0, CIBR0, size_y,
+ &sg, &next_ofs);
+ if (ret) {
+ dev_err(dev, "DMA initialization for Y/RGB failed\n");
+ goto fail;
+ }
+
+ /* init DMA for U channel */
+ if (size_u)
+ ret = pxa_init_dma_channel(pcdev, buf, dma, 1, CIBR1,
+ size_u, &sg, &next_ofs);
+ if (ret) {
+ dev_err(dev, "DMA initialization for U failed\n");
+ goto fail_u;
+ }
+
+ /* init DMA for V channel */
+ if (size_v)
+ ret = pxa_init_dma_channel(pcdev, buf, dma, 2, CIBR2,
+ size_v, &sg, &next_ofs);
+ if (ret) {
+ dev_err(dev, "DMA initialization for V failed\n");
+ goto fail_v;
+ }
+
+ vb->state = VIDEOBUF_PREPARED;
+ }
+
+ buf->inwork = 0;
+ pxa_videobuf_set_actdma(pcdev, buf);
+
+ return 0;
+
+fail_v:
+ dma_free_coherent(dev, buf->dmas[1].sg_size,
+ buf->dmas[1].sg_cpu, buf->dmas[1].sg_dma);
+fail_u:
+ dma_free_coherent(dev, buf->dmas[0].sg_size,
+ buf->dmas[0].sg_cpu, buf->dmas[0].sg_dma);
+fail:
+ free_buffer(vq, buf);
+out:
+ buf->inwork = 0;
+ return ret;
+}
+
+/**
+ * pxa_dma_start_channels - start DMA channel for active buffer
+ * @pcdev: pxa camera device
+ *
+ * Initialize DMA channels to the beginning of the active video buffer, and
+ * start these channels.
+ */
+static void pxa_dma_start_channels(struct pxa_camera_dev *pcdev)
+{
+ int i;
+ struct pxa_buffer *active;
+
+ active = pcdev->active;
+
+ for (i = 0; i < pcdev->channels; i++) {
+ dev_dbg(pcdev->soc_host.v4l2_dev.dev,
+ "%s (channel=%d) ddadr=%08x\n", __func__,
+ i, active->dmas[i].sg_dma);
+ DDADR(pcdev->dma_chans[i]) = active->dmas[i].sg_dma;
+ DCSR(pcdev->dma_chans[i]) = DCSR_RUN;
+ }
+}
+
+static void pxa_dma_stop_channels(struct pxa_camera_dev *pcdev)
+{
+ int i;
+
+ for (i = 0; i < pcdev->channels; i++) {
+ dev_dbg(pcdev->soc_host.v4l2_dev.dev,
+ "%s (channel=%d)\n", __func__, i);
+ DCSR(pcdev->dma_chans[i]) = 0;
+ }
+}
+
+static void pxa_dma_add_tail_buf(struct pxa_camera_dev *pcdev,
+ struct pxa_buffer *buf)
+{
+ int i;
+ struct pxa_dma_desc *buf_last_desc;
+
+ for (i = 0; i < pcdev->channels; i++) {
+ buf_last_desc = buf->dmas[i].sg_cpu + buf->dmas[i].sglen;
+ buf_last_desc->ddadr = DDADR_STOP;
+
+ if (pcdev->sg_tail[i])
+ /* Link the new buffer to the old tail */
+ pcdev->sg_tail[i]->ddadr = buf->dmas[i].sg_dma;
+
+ /* Update the channel tail */
+ pcdev->sg_tail[i] = buf_last_desc;
+ }
+}
+
+/**
+ * pxa_camera_start_capture - start video capturing
+ * @pcdev: camera device
+ *
+ * Launch capturing. DMA channels should not be active yet. They should get
+ * activated at the end of frame interrupt, to capture only whole frames, and
+ * never begin the capture of a partial frame.
+ */
+static void pxa_camera_start_capture(struct pxa_camera_dev *pcdev)
+{
+ unsigned long cicr0;
+
+ dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s\n", __func__);
+ /* Enable End-Of-Frame Interrupt */
+ cicr0 = __raw_readl(pcdev->base + CICR0) | CICR0_ENB;
+ cicr0 &= ~CICR0_EOFM;
+ __raw_writel(cicr0, pcdev->base + CICR0);
+}
+
+static void pxa_camera_stop_capture(struct pxa_camera_dev *pcdev)
+{
+ unsigned long cicr0;
+
+ pxa_dma_stop_channels(pcdev);
+
+ cicr0 = __raw_readl(pcdev->base + CICR0) & ~CICR0_ENB;
+ __raw_writel(cicr0, pcdev->base + CICR0);
+
+ pcdev->active = NULL;
+ dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s\n", __func__);
+}
+
+/* Called under spinlock_irqsave(&pcdev->lock, ...) */
+static void pxa_videobuf_queue(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct soc_camera_device *icd = vq->priv_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct pxa_camera_dev *pcdev = ici->priv;
+ struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
+
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d active=%p\n",
+ __func__, vb, vb->baddr, vb->bsize, pcdev->active);
+
+ list_add_tail(&vb->queue, &pcdev->capture);
+
+ vb->state = VIDEOBUF_ACTIVE;
+ pxa_dma_add_tail_buf(pcdev, buf);
+
+ if (!pcdev->active)
+ pxa_camera_start_capture(pcdev);
+}
+
+static void pxa_videobuf_release(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
+#ifdef DEBUG
+ struct soc_camera_device *icd = vq->priv_data;
+ struct device *dev = icd->parent;
+
+ dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+ vb, vb->baddr, vb->bsize);
+
+ switch (vb->state) {
+ case VIDEOBUF_ACTIVE:
+ dev_dbg(dev, "%s (active)\n", __func__);
+ break;
+ case VIDEOBUF_QUEUED:
+ dev_dbg(dev, "%s (queued)\n", __func__);
+ break;
+ case VIDEOBUF_PREPARED:
+ dev_dbg(dev, "%s (prepared)\n", __func__);
+ break;
+ default:
+ dev_dbg(dev, "%s (unknown)\n", __func__);
+ break;
+ }
+#endif
+
+ free_buffer(vq, buf);
+}
+
+static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev,
+ struct videobuf_buffer *vb,
+ struct pxa_buffer *buf)
+{
+ int i;
+
+ /* _init is used to debug races, see comment in pxa_camera_reqbufs() */
+ list_del_init(&vb->queue);
+ vb->state = VIDEOBUF_DONE;
+ do_gettimeofday(&vb->ts);
+ vb->field_count++;
+ wake_up(&vb->done);
+ dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s dequeud buffer (vb=0x%p)\n",
+ __func__, vb);
+
+ if (list_empty(&pcdev->capture)) {
+ pxa_camera_stop_capture(pcdev);
+ for (i = 0; i < pcdev->channels; i++)
+ pcdev->sg_tail[i] = NULL;
+ return;
+ }
+
+ pcdev->active = list_entry(pcdev->capture.next,
+ struct pxa_buffer, vb.queue);
+}
+
+/**
+ * pxa_camera_check_link_miss - check missed DMA linking
+ * @pcdev: camera device
+ *
+ * The DMA chaining is done with DMA running. This means a tiny temporal window
+ * remains, where a buffer is queued on the chain, while the chain is already
+ * stopped. This means the tailed buffer would never be transferred by DMA.
+ * This function restarts the capture for this corner case, where :
+ * - DADR() == DADDR_STOP
+ * - a videobuffer is queued on the pcdev->capture list
+ *
+ * Please check the "DMA hot chaining timeslice issue" in
+ * Documentation/video4linux/pxa_camera.txt
+ *
+ * Context: should only be called within the dma irq handler
+ */
+static void pxa_camera_check_link_miss(struct pxa_camera_dev *pcdev)
+{
+ int i, is_dma_stopped = 1;
+
+ for (i = 0; i < pcdev->channels; i++)
+ if (DDADR(pcdev->dma_chans[i]) != DDADR_STOP)
+ is_dma_stopped = 0;
+ dev_dbg(pcdev->soc_host.v4l2_dev.dev,
+ "%s : top queued buffer=%p, dma_stopped=%d\n",
+ __func__, pcdev->active, is_dma_stopped);
+ if (pcdev->active && is_dma_stopped)
+ pxa_camera_start_capture(pcdev);
+}
+
+static void pxa_camera_dma_irq(int channel, struct pxa_camera_dev *pcdev,
+ enum pxa_camera_active_dma act_dma)
+{
+ struct device *dev = pcdev->soc_host.v4l2_dev.dev;
+ struct pxa_buffer *buf;
+ unsigned long flags;
+ u32 status, camera_status, overrun;
+ struct videobuf_buffer *vb;
+
+ spin_lock_irqsave(&pcdev->lock, flags);
+
+ status = DCSR(channel);
+ DCSR(channel) = status;
+
+ camera_status = __raw_readl(pcdev->base + CISR);
+ overrun = CISR_IFO_0;
+ if (pcdev->channels == 3)
+ overrun |= CISR_IFO_1 | CISR_IFO_2;
+
+ if (status & DCSR_BUSERR) {
+ dev_err(dev, "DMA Bus Error IRQ!\n");
+ goto out;
+ }
+
+ if (!(status & (DCSR_ENDINTR | DCSR_STARTINTR))) {
+ dev_err(dev, "Unknown DMA IRQ source, status: 0x%08x\n",
+ status);
+ goto out;
+ }
+
+ /*
+ * pcdev->active should not be NULL in DMA irq handler.
+ *
+ * But there is one corner case : if capture was stopped due to an
+ * overrun of channel 1, and at that same channel 2 was completed.
+ *
+ * When handling the overrun in DMA irq for channel 1, we'll stop the
+ * capture and restart it (and thus set pcdev->active to NULL). But the
+ * DMA irq handler will already be pending for channel 2. So on entering
+ * the DMA irq handler for channel 2 there will be no active buffer, yet
+ * that is normal.
+ */
+ if (!pcdev->active)
+ goto out;
+
+ vb = &pcdev->active->vb;
+ buf = container_of(vb, struct pxa_buffer, vb);
+ WARN_ON(buf->inwork || list_empty(&vb->queue));
+
+ dev_dbg(dev, "%s channel=%d %s%s(vb=0x%p) dma.desc=%x\n",
+ __func__, channel, status & DCSR_STARTINTR ? "SOF " : "",
+ status & DCSR_ENDINTR ? "EOF " : "", vb, DDADR(channel));
+
+ if (status & DCSR_ENDINTR) {
+ /*
+ * It's normal if the last frame creates an overrun, as there
+ * are no more DMA descriptors to fetch from QCI fifos
+ */
+ if (camera_status & overrun &&
+ !list_is_last(pcdev->capture.next, &pcdev->capture)) {
+ dev_dbg(dev, "FIFO overrun! CISR: %x\n",
+ camera_status);
+ pxa_camera_stop_capture(pcdev);
+ pxa_camera_start_capture(pcdev);
+ goto out;
+ }
+ buf->active_dma &= ~act_dma;
+ if (!buf->active_dma) {
+ pxa_camera_wakeup(pcdev, vb, buf);
+ pxa_camera_check_link_miss(pcdev);
+ }
+ }
+
+out:
+ spin_unlock_irqrestore(&pcdev->lock, flags);
+}
+
+static void pxa_camera_dma_irq_y(int channel, void *data)
+{
+ struct pxa_camera_dev *pcdev = data;
+ pxa_camera_dma_irq(channel, pcdev, DMA_Y);
+}
+
+static void pxa_camera_dma_irq_u(int channel, void *data)
+{
+ struct pxa_camera_dev *pcdev = data;
+ pxa_camera_dma_irq(channel, pcdev, DMA_U);
+}
+
+static void pxa_camera_dma_irq_v(int channel, void *data)
+{
+ struct pxa_camera_dev *pcdev = data;
+ pxa_camera_dma_irq(channel, pcdev, DMA_V);
+}
+
+static struct videobuf_queue_ops pxa_videobuf_ops = {
+ .buf_setup = pxa_videobuf_setup,
+ .buf_prepare = pxa_videobuf_prepare,
+ .buf_queue = pxa_videobuf_queue,
+ .buf_release = pxa_videobuf_release,
+};
+
+static void pxa_camera_init_videobuf(struct videobuf_queue *q,
+ struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct pxa_camera_dev *pcdev = ici->priv;
+
+ /*
+ * We must pass NULL as dev pointer, then all pci_* dma operations
+ * transform to normal dma_* ones.
+ */
+ videobuf_queue_sg_init(q, &pxa_videobuf_ops, NULL, &pcdev->lock,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
+ sizeof(struct pxa_buffer), icd, &icd->video_lock);
+}
+
+static u32 mclk_get_divisor(struct platform_device *pdev,
+ struct pxa_camera_dev *pcdev)
+{
+ unsigned long mclk = pcdev->mclk;
+ struct device *dev = &pdev->dev;
+ u32 div;
+ unsigned long lcdclk;
+
+ lcdclk = clk_get_rate(pcdev->clk);
+ pcdev->ciclk = lcdclk;
+
+ /* mclk <= ciclk / 4 (27.4.2) */
+ if (mclk > lcdclk / 4) {
+ mclk = lcdclk / 4;
+ dev_warn(dev, "Limiting master clock to %lu\n", mclk);
+ }
+
+ /* We verify mclk != 0, so if anyone breaks it, here comes their Oops */
+ div = (lcdclk + 2 * mclk - 1) / (2 * mclk) - 1;
+
+ /* If we're not supplying MCLK, leave it at 0 */
+ if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN)
+ pcdev->mclk = lcdclk / (2 * (div + 1));
+
+ dev_dbg(dev, "LCD clock %luHz, target freq %luHz, divisor %u\n",
+ lcdclk, mclk, div);
+
+ return div;
+}
+
+static void recalculate_fifo_timeout(struct pxa_camera_dev *pcdev,
+ unsigned long pclk)
+{
+ /* We want a timeout > 1 pixel time, not ">=" */
+ u32 ciclk_per_pixel = pcdev->ciclk / pclk + 1;
+
+ __raw_writel(ciclk_per_pixel, pcdev->base + CITOR);
+}
+
+static void pxa_camera_activate(struct pxa_camera_dev *pcdev)
+{
+ u32 cicr4 = 0;
+
+ /* disable all interrupts */
+ __raw_writel(0x3ff, pcdev->base + CICR0);
+
+ if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN)
+ cicr4 |= CICR4_PCLK_EN;
+ if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN)
+ cicr4 |= CICR4_MCLK_EN;
+ if (pcdev->platform_flags & PXA_CAMERA_PCP)
+ cicr4 |= CICR4_PCP;
+ if (pcdev->platform_flags & PXA_CAMERA_HSP)
+ cicr4 |= CICR4_HSP;
+ if (pcdev->platform_flags & PXA_CAMERA_VSP)
+ cicr4 |= CICR4_VSP;
+
+ __raw_writel(pcdev->mclk_divisor | cicr4, pcdev->base + CICR4);
+
+ if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN)
+ /* Initialise the timeout under the assumption pclk = mclk */
+ recalculate_fifo_timeout(pcdev, pcdev->mclk);
+ else
+ /* "Safe default" - 13MHz */
+ recalculate_fifo_timeout(pcdev, 13000000);
+
+ clk_prepare_enable(pcdev->clk);
+}
+
+static void pxa_camera_deactivate(struct pxa_camera_dev *pcdev)
+{
+ clk_disable_unprepare(pcdev->clk);
+}
+
+static irqreturn_t pxa_camera_irq(int irq, void *data)
+{
+ struct pxa_camera_dev *pcdev = data;
+ unsigned long status, cifr, cicr0;
+ struct pxa_buffer *buf;
+ struct videobuf_buffer *vb;
+
+ status = __raw_readl(pcdev->base + CISR);
+ dev_dbg(pcdev->soc_host.v4l2_dev.dev,
+ "Camera interrupt status 0x%lx\n", status);
+
+ if (!status)
+ return IRQ_NONE;
+
+ __raw_writel(status, pcdev->base + CISR);
+
+ if (status & CISR_EOF) {
+ /* Reset the FIFOs */
+ cifr = __raw_readl(pcdev->base + CIFR) | CIFR_RESET_F;
+ __raw_writel(cifr, pcdev->base + CIFR);
+
+ pcdev->active = list_first_entry(&pcdev->capture,
+ struct pxa_buffer, vb.queue);
+ vb = &pcdev->active->vb;
+ buf = container_of(vb, struct pxa_buffer, vb);
+ pxa_videobuf_set_actdma(pcdev, buf);
+
+ pxa_dma_start_channels(pcdev);
+
+ cicr0 = __raw_readl(pcdev->base + CICR0) | CICR0_EOFM;
+ __raw_writel(cicr0, pcdev->base + CICR0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * The following two functions absolutely depend on the fact, that
+ * there can be only one camera on PXA quick capture interface
+ * Called with .video_lock held
+ */
+static int pxa_camera_add_device(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct pxa_camera_dev *pcdev = ici->priv;
+
+ if (pcdev->icd)
+ return -EBUSY;
+
+ pxa_camera_activate(pcdev);
+
+ pcdev->icd = icd;
+
+ dev_info(icd->parent, "PXA Camera driver attached to camera %d\n",
+ icd->devnum);
+
+ return 0;
+}
+
+/* Called with .video_lock held */
+static void pxa_camera_remove_device(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct pxa_camera_dev *pcdev = ici->priv;
+
+ BUG_ON(icd != pcdev->icd);
+
+ dev_info(icd->parent, "PXA Camera driver detached from camera %d\n",
+ icd->devnum);
+
+ /* disable capture, disable interrupts */
+ __raw_writel(0x3ff, pcdev->base + CICR0);
+
+ /* Stop DMA engine */
+ DCSR(pcdev->dma_chans[0]) = 0;
+ DCSR(pcdev->dma_chans[1]) = 0;
+ DCSR(pcdev->dma_chans[2]) = 0;
+
+ pxa_camera_deactivate(pcdev);
+
+ pcdev->icd = NULL;
+}
+
+static int test_platform_param(struct pxa_camera_dev *pcdev,
+ unsigned char buswidth, unsigned long *flags)
+{
+ /*
+ * Platform specified synchronization and pixel clock polarities are
+ * only a recommendation and are only used during probing. The PXA270
+ * quick capture interface supports both.
+ */
+ *flags = (pcdev->platform_flags & PXA_CAMERA_MASTER ?
+ V4L2_MBUS_MASTER : V4L2_MBUS_SLAVE) |
+ V4L2_MBUS_HSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_HSYNC_ACTIVE_LOW |
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_VSYNC_ACTIVE_LOW |
+ V4L2_MBUS_DATA_ACTIVE_HIGH |
+ V4L2_MBUS_PCLK_SAMPLE_RISING |
+ V4L2_MBUS_PCLK_SAMPLE_FALLING;
+
+ /* If requested data width is supported by the platform, use it */
+ if ((1 << (buswidth - 1)) & pcdev->width_flags)
+ return 0;
+
+ return -EINVAL;
+}
+
+static void pxa_camera_setup_cicr(struct soc_camera_device *icd,
+ unsigned long flags, __u32 pixfmt)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct pxa_camera_dev *pcdev = ici->priv;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ unsigned long dw, bpp;
+ u32 cicr0, cicr1, cicr2, cicr3, cicr4 = 0, y_skip_top;
+ int ret = v4l2_subdev_call(sd, sensor, g_skip_top_lines, &y_skip_top);
+
+ if (ret < 0)
+ y_skip_top = 0;
+
+ /*
+ * Datawidth is now guaranteed to be equal to one of the three values.
+ * We fix bit-per-pixel equal to data-width...
+ */
+ switch (icd->current_fmt->host_fmt->bits_per_sample) {
+ case 10:
+ dw = 4;
+ bpp = 0x40;
+ break;
+ case 9:
+ dw = 3;
+ bpp = 0x20;
+ break;
+ default:
+ /*
+ * Actually it can only be 8 now,
+ * default is just to silence compiler warnings
+ */
+ case 8:
+ dw = 2;
+ bpp = 0;
+ }
+
+ if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN)
+ cicr4 |= CICR4_PCLK_EN;
+ if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN)
+ cicr4 |= CICR4_MCLK_EN;
+ if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ cicr4 |= CICR4_PCP;
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ cicr4 |= CICR4_HSP;
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ cicr4 |= CICR4_VSP;
+
+ cicr0 = __raw_readl(pcdev->base + CICR0);
+ if (cicr0 & CICR0_ENB)
+ __raw_writel(cicr0 & ~CICR0_ENB, pcdev->base + CICR0);
+
+ cicr1 = CICR1_PPL_VAL(icd->user_width - 1) | bpp | dw;
+
+ switch (pixfmt) {
+ case V4L2_PIX_FMT_YUV422P:
+ pcdev->channels = 3;
+ cicr1 |= CICR1_YCBCR_F;
+ /*
+ * Normally, pxa bus wants as input UYVY format. We allow all
+ * reorderings of the YUV422 format, as no processing is done,
+ * and the YUV stream is just passed through without any
+ * transformation. Note that UYVY is the only format that
+ * should be used if pxa framebuffer Overlay2 is used.
+ */
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ cicr1 |= CICR1_COLOR_SP_VAL(2);
+ break;
+ case V4L2_PIX_FMT_RGB555:
+ cicr1 |= CICR1_RGB_BPP_VAL(1) | CICR1_RGBT_CONV_VAL(2) |
+ CICR1_TBIT | CICR1_COLOR_SP_VAL(1);
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ cicr1 |= CICR1_COLOR_SP_VAL(1) | CICR1_RGB_BPP_VAL(2);
+ break;
+ }
+
+ cicr2 = 0;
+ cicr3 = CICR3_LPF_VAL(icd->user_height - 1) |
+ CICR3_BFW_VAL(min((u32)255, y_skip_top));
+ cicr4 |= pcdev->mclk_divisor;
+
+ __raw_writel(cicr1, pcdev->base + CICR1);
+ __raw_writel(cicr2, pcdev->base + CICR2);
+ __raw_writel(cicr3, pcdev->base + CICR3);
+ __raw_writel(cicr4, pcdev->base + CICR4);
+
+ /* CIF interrupts are not used, only DMA */
+ cicr0 = (cicr0 & CICR0_ENB) | (pcdev->platform_flags & PXA_CAMERA_MASTER ?
+ CICR0_SIM_MP : (CICR0_SL_CAP_EN | CICR0_SIM_SP));
+ cicr0 |= CICR0_DMAEN | CICR0_IRQ_MASK;
+ __raw_writel(cicr0, pcdev->base + CICR0);
+}
+
+static int pxa_camera_set_bus_param(struct soc_camera_device *icd)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct pxa_camera_dev *pcdev = ici->priv;
+ struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ u32 pixfmt = icd->current_fmt->host_fmt->fourcc;
+ unsigned long bus_flags, common_flags;
+ int ret;
+ struct pxa_cam *cam = icd->host_priv;
+
+ ret = test_platform_param(pcdev, icd->current_fmt->host_fmt->bits_per_sample,
+ &bus_flags);
+ if (ret < 0)
+ return ret;
+
+ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+ if (!ret) {
+ common_flags = soc_mbus_config_compatible(&cfg,
+ bus_flags);
+ if (!common_flags) {
+ dev_warn(icd->parent,
+ "Flags incompatible: camera 0x%x, host 0x%lx\n",
+ cfg.flags, bus_flags);
+ return -EINVAL;
+ }
+ } else if (ret != -ENOIOCTLCMD) {
+ return ret;
+ } else {
+ common_flags = bus_flags;
+ }
+
+ pcdev->channels = 1;
+
+ /* Make choises, based on platform preferences */
+ if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
+ if (pcdev->platform_flags & PXA_CAMERA_HSP)
+ common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH;
+ else
+ common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW;
+ }
+
+ if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) {
+ if (pcdev->platform_flags & PXA_CAMERA_VSP)
+ common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH;
+ else
+ common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW;
+ }
+
+ if ((common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) &&
+ (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)) {
+ if (pcdev->platform_flags & PXA_CAMERA_PCP)
+ common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_RISING;
+ else
+ common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_FALLING;
+ }
+
+ cfg.flags = common_flags;
+ ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_dbg(icd->parent, "camera s_mbus_config(0x%lx) returned %d\n",
+ common_flags, ret);
+ return ret;
+ }
+
+ cam->flags = common_flags;
+
+ pxa_camera_setup_cicr(icd, common_flags, pixfmt);
+
+ return 0;
+}
+
+static int pxa_camera_try_bus_param(struct soc_camera_device *icd,
+ unsigned char buswidth)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct pxa_camera_dev *pcdev = ici->priv;
+ struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ unsigned long bus_flags, common_flags;
+ int ret = test_platform_param(pcdev, buswidth, &bus_flags);
+
+ if (ret < 0)
+ return ret;
+
+ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+ if (!ret) {
+ common_flags = soc_mbus_config_compatible(&cfg,
+ bus_flags);
+ if (!common_flags) {
+ dev_warn(icd->parent,
+ "Flags incompatible: camera 0x%x, host 0x%lx\n",
+ cfg.flags, bus_flags);
+ return -EINVAL;
+ }
+ } else if (ret == -ENOIOCTLCMD) {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static const struct soc_mbus_pixelfmt pxa_camera_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .name = "Planar YUV422 16 bit",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PLANAR_2Y_U_V,
+ },
+};
+
+/* This will be corrected as we get more formats */
+static bool pxa_camera_packing_supported(const struct soc_mbus_pixelfmt *fmt)
+{
+ return fmt->packing == SOC_MBUS_PACKING_NONE ||
+ (fmt->bits_per_sample == 8 &&
+ fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) ||
+ (fmt->bits_per_sample > 8 &&
+ fmt->packing == SOC_MBUS_PACKING_EXTEND16);
+}
+
+static int pxa_camera_get_formats(struct soc_camera_device *icd, unsigned int idx,
+ struct soc_camera_format_xlate *xlate)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct device *dev = icd->parent;
+ int formats = 0, ret;
+ struct pxa_cam *cam;
+ enum v4l2_mbus_pixelcode code;
+ const struct soc_mbus_pixelfmt *fmt;
+
+ ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
+ if (ret < 0)
+ /* No more formats */
+ return 0;
+
+ fmt = soc_mbus_get_fmtdesc(code);
+ if (!fmt) {
+ dev_err(dev, "Invalid format code #%u: %d\n", idx, code);
+ return 0;
+ }
+
+ /* This also checks support for the requested bits-per-sample */
+ ret = pxa_camera_try_bus_param(icd, fmt->bits_per_sample);
+ if (ret < 0)
+ return 0;
+
+ if (!icd->host_priv) {
+ cam = kzalloc(sizeof(*cam), GFP_KERNEL);
+ if (!cam)
+ return -ENOMEM;
+
+ icd->host_priv = cam;
+ } else {
+ cam = icd->host_priv;
+ }
+
+ switch (code) {
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ formats++;
+ if (xlate) {
+ xlate->host_fmt = &pxa_camera_formats[0];
+ xlate->code = code;
+ xlate++;
+ dev_dbg(dev, "Providing format %s using code %d\n",
+ pxa_camera_formats[0].name, code);
+ }
+ case V4L2_MBUS_FMT_VYUY8_2X8:
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ case V4L2_MBUS_FMT_YVYU8_2X8:
+ case V4L2_MBUS_FMT_RGB565_2X8_LE:
+ case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE:
+ if (xlate)
+ dev_dbg(dev, "Providing format %s packed\n",
+ fmt->name);
+ break;
+ default:
+ if (!pxa_camera_packing_supported(fmt))
+ return 0;
+ if (xlate)
+ dev_dbg(dev,
+ "Providing format %s in pass-through mode\n",
+ fmt->name);
+ }
+
+ /* Generic pass-through */
+ formats++;
+ if (xlate) {
+ xlate->host_fmt = fmt;
+ xlate->code = code;
+ xlate++;
+ }
+
+ return formats;
+}
+
+static void pxa_camera_put_formats(struct soc_camera_device *icd)
+{
+ kfree(icd->host_priv);
+ icd->host_priv = NULL;
+}
+
+static int pxa_camera_check_frame(u32 width, u32 height)
+{
+ /* limit to pxa hardware capabilities */
+ return height < 32 || height > 2048 || width < 48 || width > 2048 ||
+ (width & 0x01);
+}
+
+static int pxa_camera_set_crop(struct soc_camera_device *icd,
+ struct v4l2_crop *a)
+{
+ struct v4l2_rect *rect = &a->c;
+ struct device *dev = icd->parent;
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
+ struct pxa_camera_dev *pcdev = ici->priv;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct soc_camera_sense sense = {
+ .master_clock = pcdev->mclk,
+ .pixel_clock_max = pcdev->ciclk / 4,
+ };
+ struct v4l2_mbus_framefmt mf;
+ struct pxa_cam *cam = icd->host_priv;
+ u32 fourcc = icd->current_fmt->host_fmt->fourcc;
+ int ret;
+
+ /* If PCLK is used to latch data from the sensor, check sense */
+ if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN)
+ icd->sense = &sense;
+
+ ret = v4l2_subdev_call(sd, video, s_crop, a);
+
+ icd->sense = NULL;
+
+ if (ret < 0) {
+ dev_warn(dev, "Failed to crop to %ux%u@%u:%u\n",
+ rect->width, rect->height, rect->left, rect->top);
+ return ret;
+ }
+
+ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ if (pxa_camera_check_frame(mf.width, mf.height)) {
+ /*
+ * Camera cropping produced a frame beyond our capabilities.
+ * FIXME: just extract a subframe, that we can process.
+ */
+ v4l_bound_align_image(&mf.width, 48, 2048, 1,
+ &mf.height, 32, 2048, 0,
+ fourcc == V4L2_PIX_FMT_YUV422P ? 4 : 0);
+ ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ if (pxa_camera_check_frame(mf.width, mf.height)) {
+ dev_warn(icd->parent,
+ "Inconsistent state. Use S_FMT to repair\n");
+ return -EINVAL;
+ }
+ }
+
+ if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) {
+ if (sense.pixel_clock > sense.pixel_clock_max) {
+ dev_err(dev,
+ "pixel clock %lu set by the camera too high!",
+ sense.pixel_clock);
+ return -EIO;
+ }
+ recalculate_fifo_timeout(pcdev, sense.pixel_clock);
+ }
+
+ icd->user_width = mf.width;
+ icd->user_height = mf.height;
+
+ pxa_camera_setup_cicr(icd, cam->flags, fourcc);
+
+ return ret;
+}
+
+static int pxa_camera_set_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct device *dev = icd->parent;
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
+ struct pxa_camera_dev *pcdev = ici->priv;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ const struct soc_camera_format_xlate *xlate = NULL;
+ struct soc_camera_sense sense = {
+ .master_clock = pcdev->mclk,
+ .pixel_clock_max = pcdev->ciclk / 4,
+ };
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_mbus_framefmt mf;
+ int ret;
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+ if (!xlate) {
+ dev_warn(dev, "Format %x not found\n", pix->pixelformat);
+ return -EINVAL;
+ }
+
+ /* If PCLK is used to latch data from the sensor, check sense */
+ if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN)
+ /* The caller holds a mutex. */
+ icd->sense = &sense;
+
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
+
+ ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
+
+ if (mf.code != xlate->code)
+ return -EINVAL;
+
+ icd->sense = NULL;
+
+ if (ret < 0) {
+ dev_warn(dev, "Failed to configure for format %x\n",
+ pix->pixelformat);
+ } else if (pxa_camera_check_frame(mf.width, mf.height)) {
+ dev_warn(dev,
+ "Camera driver produced an unsupported frame %dx%d\n",
+ mf.width, mf.height);
+ ret = -EINVAL;
+ } else if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) {
+ if (sense.pixel_clock > sense.pixel_clock_max) {
+ dev_err(dev,
+ "pixel clock %lu set by the camera too high!",
+ sense.pixel_clock);
+ return -EIO;
+ }
+ recalculate_fifo_timeout(pcdev, sense.pixel_clock);
+ }
+
+ if (ret < 0)
+ return ret;
+
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->field = mf.field;
+ pix->colorspace = mf.colorspace;
+ icd->current_fmt = xlate;
+
+ return ret;
+}
+
+static int pxa_camera_try_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ const struct soc_camera_format_xlate *xlate;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_mbus_framefmt mf;
+ __u32 pixfmt = pix->pixelformat;
+ int ret;
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+ if (!xlate) {
+ dev_warn(icd->parent, "Format %x not found\n", pixfmt);
+ return -EINVAL;
+ }
+
+ /*
+ * Limit to pxa hardware capabilities. YUV422P planar format requires
+ * images size to be a multiple of 16 bytes. If not, zeros will be
+ * inserted between Y and U planes, and U and V planes, which violates
+ * the YUV422P standard.
+ */
+ v4l_bound_align_image(&pix->width, 48, 2048, 1,
+ &pix->height, 32, 2048, 0,
+ pixfmt == V4L2_PIX_FMT_YUV422P ? 4 : 0);
+
+ /* limit to sensor capabilities */
+ mf.width = pix->width;
+ mf.height = pix->height;
+ /* Only progressive video supported so far */
+ mf.field = V4L2_FIELD_NONE;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
+
+ ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->colorspace = mf.colorspace;
+
+ switch (mf.field) {
+ case V4L2_FIELD_ANY:
+ case V4L2_FIELD_NONE:
+ pix->field = V4L2_FIELD_NONE;
+ break;
+ default:
+ /* TODO: support interlaced at least in pass-through mode */
+ dev_err(icd->parent, "Field type %d unsupported.\n",
+ mf.field);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int pxa_camera_reqbufs(struct soc_camera_device *icd,
+ struct v4l2_requestbuffers *p)
+{
+ int i;
+
+ /*
+ * This is for locking debugging only. I removed spinlocks and now I
+ * check whether .prepare is ever called on a linked buffer, or whether
+ * a dma IRQ can occur for an in-work or unlinked buffer. Until now
+ * it hadn't triggered
+ */
+ for (i = 0; i < p->count; i++) {
+ struct pxa_buffer *buf = container_of(icd->vb_vidq.bufs[i],
+ struct pxa_buffer, vb);
+ buf->inwork = 0;
+ INIT_LIST_HEAD(&buf->vb.queue);
+ }
+
+ return 0;
+}
+
+static unsigned int pxa_camera_poll(struct file *file, poll_table *pt)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct pxa_buffer *buf;
+
+ buf = list_entry(icd->vb_vidq.stream.next, struct pxa_buffer,
+ vb.stream);
+
+ poll_wait(file, &buf->vb.done, pt);
+
+ if (buf->vb.state == VIDEOBUF_DONE ||
+ buf->vb.state == VIDEOBUF_ERROR)
+ return POLLIN|POLLRDNORM;
+
+ return 0;
+}
+
+static int pxa_camera_querycap(struct soc_camera_host *ici,
+ struct v4l2_capability *cap)
+{
+ /* cap->name is set by the firendly caller:-> */
+ strlcpy(cap->card, pxa_cam_driver_description, sizeof(cap->card));
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+
+ return 0;
+}
+
+static int pxa_camera_suspend(struct device *dev)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
+ struct pxa_camera_dev *pcdev = ici->priv;
+ int i = 0, ret = 0;
+
+ pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR0);
+ pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR1);
+ pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR2);
+ pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR3);
+ pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR4);
+
+ if (pcdev->icd) {
+ struct v4l2_subdev *sd = soc_camera_to_subdev(pcdev->icd);
+ ret = v4l2_subdev_call(sd, core, s_power, 0);
+ if (ret == -ENOIOCTLCMD)
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int pxa_camera_resume(struct device *dev)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
+ struct pxa_camera_dev *pcdev = ici->priv;
+ int i = 0, ret = 0;
+
+ DRCMR(68) = pcdev->dma_chans[0] | DRCMR_MAPVLD;
+ DRCMR(69) = pcdev->dma_chans[1] | DRCMR_MAPVLD;
+ DRCMR(70) = pcdev->dma_chans[2] | DRCMR_MAPVLD;
+
+ __raw_writel(pcdev->save_cicr[i++] & ~CICR0_ENB, pcdev->base + CICR0);
+ __raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR1);
+ __raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR2);
+ __raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR3);
+ __raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR4);
+
+ if (pcdev->icd) {
+ struct v4l2_subdev *sd = soc_camera_to_subdev(pcdev->icd);
+ ret = v4l2_subdev_call(sd, core, s_power, 1);
+ if (ret == -ENOIOCTLCMD)
+ ret = 0;
+ }
+
+ /* Restart frame capture if active buffer exists */
+ if (!ret && pcdev->active)
+ pxa_camera_start_capture(pcdev);
+
+ return ret;
+}
+
+static struct soc_camera_host_ops pxa_soc_camera_host_ops = {
+ .owner = THIS_MODULE,
+ .add = pxa_camera_add_device,
+ .remove = pxa_camera_remove_device,
+ .set_crop = pxa_camera_set_crop,
+ .get_formats = pxa_camera_get_formats,
+ .put_formats = pxa_camera_put_formats,
+ .set_fmt = pxa_camera_set_fmt,
+ .try_fmt = pxa_camera_try_fmt,
+ .init_videobuf = pxa_camera_init_videobuf,
+ .reqbufs = pxa_camera_reqbufs,
+ .poll = pxa_camera_poll,
+ .querycap = pxa_camera_querycap,
+ .set_bus_param = pxa_camera_set_bus_param,
+};
+
+static int __devinit pxa_camera_probe(struct platform_device *pdev)
+{
+ struct pxa_camera_dev *pcdev;
+ struct resource *res;
+ void __iomem *base;
+ int irq;
+ int err = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!res || irq < 0) {
+ err = -ENODEV;
+ goto exit;
+ }
+
+ pcdev = kzalloc(sizeof(*pcdev), GFP_KERNEL);
+ if (!pcdev) {
+ dev_err(&pdev->dev, "Could not allocate pcdev\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ pcdev->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pcdev->clk)) {
+ err = PTR_ERR(pcdev->clk);
+ goto exit_kfree;
+ }
+
+ pcdev->res = res;
+
+ pcdev->pdata = pdev->dev.platform_data;
+ pcdev->platform_flags = pcdev->pdata->flags;
+ if (!(pcdev->platform_flags & (PXA_CAMERA_DATAWIDTH_8 |
+ PXA_CAMERA_DATAWIDTH_9 | PXA_CAMERA_DATAWIDTH_10))) {
+ /*
+ * Platform hasn't set available data widths. This is bad.
+ * Warn and use a default.
+ */
+ dev_warn(&pdev->dev, "WARNING! Platform hasn't set available "
+ "data widths, using default 10 bit\n");
+ pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_10;
+ }
+ if (pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_8)
+ pcdev->width_flags = 1 << 7;
+ if (pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_9)
+ pcdev->width_flags |= 1 << 8;
+ if (pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_10)
+ pcdev->width_flags |= 1 << 9;
+ pcdev->mclk = pcdev->pdata->mclk_10khz * 10000;
+ if (!pcdev->mclk) {
+ dev_warn(&pdev->dev,
+ "mclk == 0! Please, fix your platform data. "
+ "Using default 20MHz\n");
+ pcdev->mclk = 20000000;
+ }
+
+ pcdev->mclk_divisor = mclk_get_divisor(pdev, pcdev);
+
+ INIT_LIST_HEAD(&pcdev->capture);
+ spin_lock_init(&pcdev->lock);
+
+ /*
+ * Request the regions.
+ */
+ if (!request_mem_region(res->start, resource_size(res),
+ PXA_CAM_DRV_NAME)) {
+ err = -EBUSY;
+ goto exit_clk;
+ }
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base) {
+ err = -ENOMEM;
+ goto exit_release;
+ }
+ pcdev->irq = irq;
+ pcdev->base = base;
+
+ /* request dma */
+ err = pxa_request_dma("CI_Y", DMA_PRIO_HIGH,
+ pxa_camera_dma_irq_y, pcdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Can't request DMA for Y\n");
+ goto exit_iounmap;
+ }
+ pcdev->dma_chans[0] = err;
+ dev_dbg(&pdev->dev, "got DMA channel %d\n", pcdev->dma_chans[0]);
+
+ err = pxa_request_dma("CI_U", DMA_PRIO_HIGH,
+ pxa_camera_dma_irq_u, pcdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Can't request DMA for U\n");
+ goto exit_free_dma_y;
+ }
+ pcdev->dma_chans[1] = err;
+ dev_dbg(&pdev->dev, "got DMA channel (U) %d\n", pcdev->dma_chans[1]);
+
+ err = pxa_request_dma("CI_V", DMA_PRIO_HIGH,
+ pxa_camera_dma_irq_v, pcdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Can't request DMA for V\n");
+ goto exit_free_dma_u;
+ }
+ pcdev->dma_chans[2] = err;
+ dev_dbg(&pdev->dev, "got DMA channel (V) %d\n", pcdev->dma_chans[2]);
+
+ DRCMR(68) = pcdev->dma_chans[0] | DRCMR_MAPVLD;
+ DRCMR(69) = pcdev->dma_chans[1] | DRCMR_MAPVLD;
+ DRCMR(70) = pcdev->dma_chans[2] | DRCMR_MAPVLD;
+
+ /* request irq */
+ err = request_irq(pcdev->irq, pxa_camera_irq, 0, PXA_CAM_DRV_NAME,
+ pcdev);
+ if (err) {
+ dev_err(&pdev->dev, "Camera interrupt register failed \n");
+ goto exit_free_dma;
+ }
+
+ pcdev->soc_host.drv_name = PXA_CAM_DRV_NAME;
+ pcdev->soc_host.ops = &pxa_soc_camera_host_ops;
+ pcdev->soc_host.priv = pcdev;
+ pcdev->soc_host.v4l2_dev.dev = &pdev->dev;
+ pcdev->soc_host.nr = pdev->id;
+
+ err = soc_camera_host_register(&pcdev->soc_host);
+ if (err)
+ goto exit_free_irq;
+
+ return 0;
+
+exit_free_irq:
+ free_irq(pcdev->irq, pcdev);
+exit_free_dma:
+ pxa_free_dma(pcdev->dma_chans[2]);
+exit_free_dma_u:
+ pxa_free_dma(pcdev->dma_chans[1]);
+exit_free_dma_y:
+ pxa_free_dma(pcdev->dma_chans[0]);
+exit_iounmap:
+ iounmap(base);
+exit_release:
+ release_mem_region(res->start, resource_size(res));
+exit_clk:
+ clk_put(pcdev->clk);
+exit_kfree:
+ kfree(pcdev);
+exit:
+ return err;
+}
+
+static int __devexit pxa_camera_remove(struct platform_device *pdev)
+{
+ struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
+ struct pxa_camera_dev *pcdev = container_of(soc_host,
+ struct pxa_camera_dev, soc_host);
+ struct resource *res;
+
+ clk_put(pcdev->clk);
+
+ pxa_free_dma(pcdev->dma_chans[0]);
+ pxa_free_dma(pcdev->dma_chans[1]);
+ pxa_free_dma(pcdev->dma_chans[2]);
+ free_irq(pcdev->irq, pcdev);
+
+ soc_camera_host_unregister(soc_host);
+
+ iounmap(pcdev->base);
+
+ res = pcdev->res;
+ release_mem_region(res->start, resource_size(res));
+
+ kfree(pcdev);
+
+ dev_info(&pdev->dev, "PXA Camera driver unloaded\n");
+
+ return 0;
+}
+
+static struct dev_pm_ops pxa_camera_pm = {
+ .suspend = pxa_camera_suspend,
+ .resume = pxa_camera_resume,
+};
+
+static struct platform_driver pxa_camera_driver = {
+ .driver = {
+ .name = PXA_CAM_DRV_NAME,
+ .pm = &pxa_camera_pm,
+ },
+ .probe = pxa_camera_probe,
+ .remove = __devexit_p(pxa_camera_remove),
+};
+
+module_platform_driver(pxa_camera_driver);
+
+MODULE_DESCRIPTION("PXA27x SoC Camera Host driver");
+MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(PXA_CAM_VERSION);
+MODULE_ALIAS("platform:" PXA_CAM_DRV_NAME);
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
new file mode 100644
index 0000000..0a24253
--- /dev/null
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -0,0 +1,2331 @@
+/*
+ * V4L2 Driver for SuperH Mobile CEU interface
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * Based on V4L2 Driver for PXA camera host - "pxa_camera.c",
+ *
+ * Copyright (C) 2006, Sascha Hauer, Pengutronix
+ * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/moduleparam.h>
+#include <linux/time.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <linux/pm_runtime.h>
+#include <linux/sched.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-dev.h>
+#include <media/soc_camera.h>
+#include <media/sh_mobile_ceu.h>
+#include <media/sh_mobile_csi2.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-mediabus.h>
+#include <media/soc_mediabus.h>
+
+/* register offsets for sh7722 / sh7723 */
+
+#define CAPSR 0x00 /* Capture start register */
+#define CAPCR 0x04 /* Capture control register */
+#define CAMCR 0x08 /* Capture interface control register */
+#define CMCYR 0x0c /* Capture interface cycle register */
+#define CAMOR 0x10 /* Capture interface offset register */
+#define CAPWR 0x14 /* Capture interface width register */
+#define CAIFR 0x18 /* Capture interface input format register */
+#define CSTCR 0x20 /* Camera strobe control register (<= sh7722) */
+#define CSECR 0x24 /* Camera strobe emission count register (<= sh7722) */
+#define CRCNTR 0x28 /* CEU register control register */
+#define CRCMPR 0x2c /* CEU register forcible control register */
+#define CFLCR 0x30 /* Capture filter control register */
+#define CFSZR 0x34 /* Capture filter size clip register */
+#define CDWDR 0x38 /* Capture destination width register */
+#define CDAYR 0x3c /* Capture data address Y register */
+#define CDACR 0x40 /* Capture data address C register */
+#define CDBYR 0x44 /* Capture data bottom-field address Y register */
+#define CDBCR 0x48 /* Capture data bottom-field address C register */
+#define CBDSR 0x4c /* Capture bundle destination size register */
+#define CFWCR 0x5c /* Firewall operation control register */
+#define CLFCR 0x60 /* Capture low-pass filter control register */
+#define CDOCR 0x64 /* Capture data output control register */
+#define CDDCR 0x68 /* Capture data complexity level register */
+#define CDDAR 0x6c /* Capture data complexity level address register */
+#define CEIER 0x70 /* Capture event interrupt enable register */
+#define CETCR 0x74 /* Capture event flag clear register */
+#define CSTSR 0x7c /* Capture status register */
+#define CSRTR 0x80 /* Capture software reset register */
+#define CDSSR 0x84 /* Capture data size register */
+#define CDAYR2 0x90 /* Capture data address Y register 2 */
+#define CDACR2 0x94 /* Capture data address C register 2 */
+#define CDBYR2 0x98 /* Capture data bottom-field address Y register 2 */
+#define CDBCR2 0x9c /* Capture data bottom-field address C register 2 */
+
+#undef DEBUG_GEOMETRY
+#ifdef DEBUG_GEOMETRY
+#define dev_geo dev_info
+#else
+#define dev_geo dev_dbg
+#endif
+
+/* per video frame buffer */
+struct sh_mobile_ceu_buffer {
+ struct vb2_buffer vb; /* v4l buffer must be first */
+ struct list_head queue;
+};
+
+struct sh_mobile_ceu_dev {
+ struct soc_camera_host ici;
+ struct soc_camera_device *icd;
+ struct platform_device *csi2_pdev;
+
+ unsigned int irq;
+ void __iomem *base;
+ size_t video_limit;
+ size_t buf_total;
+
+ spinlock_t lock; /* Protects video buffer lists */
+ struct list_head capture;
+ struct vb2_buffer *active;
+ struct vb2_alloc_ctx *alloc_ctx;
+
+ struct sh_mobile_ceu_info *pdata;
+ struct completion complete;
+
+ u32 cflcr;
+
+ /* static max sizes either from platform data or default */
+ int max_width;
+ int max_height;
+
+ enum v4l2_field field;
+ int sequence;
+
+ unsigned int image_mode:1;
+ unsigned int is_16bit:1;
+ unsigned int frozen:1;
+};
+
+struct sh_mobile_ceu_cam {
+ /* CEU offsets within the camera output, before the CEU scaler */
+ unsigned int ceu_left;
+ unsigned int ceu_top;
+ /* Client output, as seen by the CEU */
+ unsigned int width;
+ unsigned int height;
+ /*
+ * User window from S_CROP / G_CROP, produced by client cropping and
+ * scaling, CEU scaling and CEU cropping, mapped back onto the client
+ * input window
+ */
+ struct v4l2_rect subrect;
+ /* Camera cropping rectangle */
+ struct v4l2_rect rect;
+ const struct soc_mbus_pixelfmt *extra_fmt;
+ enum v4l2_mbus_pixelcode code;
+};
+
+static struct sh_mobile_ceu_buffer *to_ceu_vb(struct vb2_buffer *vb)
+{
+ return container_of(vb, struct sh_mobile_ceu_buffer, vb);
+}
+
+static void ceu_write(struct sh_mobile_ceu_dev *priv,
+ unsigned long reg_offs, u32 data)
+{
+ iowrite32(data, priv->base + reg_offs);
+}
+
+static u32 ceu_read(struct sh_mobile_ceu_dev *priv, unsigned long reg_offs)
+{
+ return ioread32(priv->base + reg_offs);
+}
+
+static int sh_mobile_ceu_soft_reset(struct sh_mobile_ceu_dev *pcdev)
+{
+ int i, success = 0;
+ struct soc_camera_device *icd = pcdev->icd;
+
+ ceu_write(pcdev, CAPSR, 1 << 16); /* reset */
+
+ /* wait CSTSR.CPTON bit */
+ for (i = 0; i < 1000; i++) {
+ if (!(ceu_read(pcdev, CSTSR) & 1)) {
+ success++;
+ break;
+ }
+ udelay(1);
+ }
+
+ /* wait CAPSR.CPKIL bit */
+ for (i = 0; i < 1000; i++) {
+ if (!(ceu_read(pcdev, CAPSR) & (1 << 16))) {
+ success++;
+ break;
+ }
+ udelay(1);
+ }
+
+
+ if (2 != success) {
+ dev_warn(icd->pdev, "soft reset time out\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * Videobuf operations
+ */
+
+/*
+ * .queue_setup() is called to check, whether the driver can accept the
+ * requested number of buffers and to fill in plane sizes
+ * for the current frame format if required
+ */
+static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *count, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct soc_camera_device *icd = container_of(vq, struct soc_camera_device, vb2_vidq);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+
+ if (fmt) {
+ const struct soc_camera_format_xlate *xlate = soc_camera_xlate_by_fourcc(icd,
+ fmt->fmt.pix.pixelformat);
+ unsigned int bytes_per_line;
+ int ret;
+
+ if (!xlate)
+ return -EINVAL;
+
+ ret = soc_mbus_bytes_per_line(fmt->fmt.pix.width,
+ xlate->host_fmt);
+ if (ret < 0)
+ return ret;
+
+ bytes_per_line = max_t(u32, fmt->fmt.pix.bytesperline, ret);
+
+ ret = soc_mbus_image_size(xlate->host_fmt, bytes_per_line,
+ fmt->fmt.pix.height);
+ if (ret < 0)
+ return ret;
+
+ sizes[0] = max_t(u32, fmt->fmt.pix.sizeimage, ret);
+ } else {
+ /* Called from VIDIOC_REQBUFS or in compatibility mode */
+ sizes[0] = icd->sizeimage;
+ }
+
+ alloc_ctxs[0] = pcdev->alloc_ctx;
+
+ if (!vq->num_buffers)
+ pcdev->sequence = 0;
+
+ if (!*count)
+ *count = 2;
+
+ /* If *num_planes != 0, we have already verified *count. */
+ if (pcdev->video_limit && !*num_planes) {
+ size_t size = PAGE_ALIGN(sizes[0]) * *count;
+
+ if (size + pcdev->buf_total > pcdev->video_limit)
+ *count = (pcdev->video_limit - pcdev->buf_total) /
+ PAGE_ALIGN(sizes[0]);
+ }
+
+ *num_planes = 1;
+
+ dev_dbg(icd->parent, "count=%d, size=%u\n", *count, sizes[0]);
+
+ return 0;
+}
+
+#define CEU_CETCR_MAGIC 0x0317f313 /* acknowledge magical interrupt sources */
+#define CEU_CETCR_IGRW (1 << 4) /* prohibited register access interrupt bit */
+#define CEU_CEIER_CPEIE (1 << 0) /* one-frame capture end interrupt */
+#define CEU_CEIER_VBP (1 << 20) /* vbp error */
+#define CEU_CAPCR_CTNCP (1 << 16) /* continuous capture mode (if set) */
+#define CEU_CEIER_MASK (CEU_CEIER_CPEIE | CEU_CEIER_VBP)
+
+
+/*
+ * return value doesn't reflex the success/failure to queue the new buffer,
+ * but rather the status of the previous buffer.
+ */
+static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev)
+{
+ struct soc_camera_device *icd = pcdev->icd;
+ dma_addr_t phys_addr_top, phys_addr_bottom;
+ unsigned long top1, top2;
+ unsigned long bottom1, bottom2;
+ u32 status;
+ bool planar;
+ int ret = 0;
+
+ /*
+ * The hardware is _very_ picky about this sequence. Especially
+ * the CEU_CETCR_MAGIC value. It seems like we need to acknowledge
+ * several not-so-well documented interrupt sources in CETCR.
+ */
+ ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) & ~CEU_CEIER_MASK);
+ status = ceu_read(pcdev, CETCR);
+ ceu_write(pcdev, CETCR, ~status & CEU_CETCR_MAGIC);
+ if (!pcdev->frozen)
+ ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) | CEU_CEIER_MASK);
+ ceu_write(pcdev, CAPCR, ceu_read(pcdev, CAPCR) & ~CEU_CAPCR_CTNCP);
+ ceu_write(pcdev, CETCR, CEU_CETCR_MAGIC ^ CEU_CETCR_IGRW);
+
+ /*
+ * When a VBP interrupt occurs, a capture end interrupt does not occur
+ * and the image of that frame is not captured correctly. So, soft reset
+ * is needed here.
+ */
+ if (status & CEU_CEIER_VBP) {
+ sh_mobile_ceu_soft_reset(pcdev);
+ ret = -EIO;
+ }
+
+ if (pcdev->frozen) {
+ complete(&pcdev->complete);
+ return ret;
+ }
+
+ if (!pcdev->active)
+ return ret;
+
+ if (V4L2_FIELD_INTERLACED_BT == pcdev->field) {
+ top1 = CDBYR;
+ top2 = CDBCR;
+ bottom1 = CDAYR;
+ bottom2 = CDACR;
+ } else {
+ top1 = CDAYR;
+ top2 = CDACR;
+ bottom1 = CDBYR;
+ bottom2 = CDBCR;
+ }
+
+ phys_addr_top = vb2_dma_contig_plane_dma_addr(pcdev->active, 0);
+
+ switch (icd->current_fmt->host_fmt->fourcc) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ planar = true;
+ break;
+ default:
+ planar = false;
+ }
+
+ ceu_write(pcdev, top1, phys_addr_top);
+ if (V4L2_FIELD_NONE != pcdev->field) {
+ phys_addr_bottom = phys_addr_top + icd->bytesperline;
+ ceu_write(pcdev, bottom1, phys_addr_bottom);
+ }
+
+ if (planar) {
+ phys_addr_top += icd->bytesperline * icd->user_height;
+ ceu_write(pcdev, top2, phys_addr_top);
+ if (V4L2_FIELD_NONE != pcdev->field) {
+ phys_addr_bottom = phys_addr_top + icd->bytesperline;
+ ceu_write(pcdev, bottom2, phys_addr_bottom);
+ }
+ }
+
+ ceu_write(pcdev, CAPSR, 0x1); /* start capture */
+
+ return ret;
+}
+
+static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb)
+{
+ struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb);
+
+ /* Added list head initialization on alloc */
+ WARN(!list_empty(&buf->queue), "Buffer %p on queue!\n", vb);
+
+ return 0;
+}
+
+static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb)
+{
+ struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+ struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb);
+ unsigned long size;
+
+ size = icd->sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dev_err(icd->parent, "Buffer #%d too small (%lu < %lu)\n",
+ vb->v4l2_buf.index, vb2_plane_size(vb, 0), size);
+ goto error;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
+ vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
+
+#ifdef DEBUG
+ /*
+ * This can be useful if you want to see if we actually fill
+ * the buffer with something
+ */
+ if (vb2_plane_vaddr(vb, 0))
+ memset(vb2_plane_vaddr(vb, 0), 0xaa, vb2_get_plane_payload(vb, 0));
+#endif
+
+ spin_lock_irq(&pcdev->lock);
+ list_add_tail(&buf->queue, &pcdev->capture);
+
+ if (!pcdev->active) {
+ /*
+ * Because there were no active buffer at this moment,
+ * we are not interested in the return value of
+ * sh_mobile_ceu_capture here.
+ */
+ pcdev->active = vb;
+ sh_mobile_ceu_capture(pcdev);
+ }
+ spin_unlock_irq(&pcdev->lock);
+
+ return;
+
+error:
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+}
+
+static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb)
+{
+ struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+
+ spin_lock_irq(&pcdev->lock);
+
+ if (pcdev->active == vb) {
+ /* disable capture (release DMA buffer), reset */
+ ceu_write(pcdev, CAPSR, 1 << 16);
+ pcdev->active = NULL;
+ }
+
+ /*
+ * Doesn't hurt also if the list is empty, but it hurts, if queuing the
+ * buffer failed, and .buf_init() hasn't been called
+ */
+ if (buf->queue.next)
+ list_del_init(&buf->queue);
+
+ pcdev->buf_total -= PAGE_ALIGN(vb2_plane_size(vb, 0));
+ dev_dbg(icd->parent, "%s() %zu bytes buffers\n", __func__,
+ pcdev->buf_total);
+
+ spin_unlock_irq(&pcdev->lock);
+}
+
+static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb)
+{
+ struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+
+ pcdev->buf_total += PAGE_ALIGN(vb2_plane_size(vb, 0));
+ dev_dbg(icd->parent, "%s() %zu bytes buffers\n", __func__,
+ pcdev->buf_total);
+
+ /* This is for locking debugging only */
+ INIT_LIST_HEAD(&to_ceu_vb(vb)->queue);
+ return 0;
+}
+
+static int sh_mobile_ceu_stop_streaming(struct vb2_queue *q)
+{
+ struct soc_camera_device *icd = container_of(q, struct soc_camera_device, vb2_vidq);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+ struct list_head *buf_head, *tmp;
+
+ spin_lock_irq(&pcdev->lock);
+
+ pcdev->active = NULL;
+
+ list_for_each_safe(buf_head, tmp, &pcdev->capture)
+ list_del_init(buf_head);
+
+ spin_unlock_irq(&pcdev->lock);
+
+ return sh_mobile_ceu_soft_reset(pcdev);
+}
+
+static struct vb2_ops sh_mobile_ceu_videobuf_ops = {
+ .queue_setup = sh_mobile_ceu_videobuf_setup,
+ .buf_prepare = sh_mobile_ceu_videobuf_prepare,
+ .buf_queue = sh_mobile_ceu_videobuf_queue,
+ .buf_cleanup = sh_mobile_ceu_videobuf_release,
+ .buf_init = sh_mobile_ceu_videobuf_init,
+ .wait_prepare = soc_camera_unlock,
+ .wait_finish = soc_camera_lock,
+ .stop_streaming = sh_mobile_ceu_stop_streaming,
+};
+
+static irqreturn_t sh_mobile_ceu_irq(int irq, void *data)
+{
+ struct sh_mobile_ceu_dev *pcdev = data;
+ struct vb2_buffer *vb;
+ int ret;
+
+ spin_lock(&pcdev->lock);
+
+ vb = pcdev->active;
+ if (!vb)
+ /* Stale interrupt from a released buffer */
+ goto out;
+
+ list_del_init(&to_ceu_vb(vb)->queue);
+
+ if (!list_empty(&pcdev->capture))
+ pcdev->active = &list_entry(pcdev->capture.next,
+ struct sh_mobile_ceu_buffer, queue)->vb;
+ else
+ pcdev->active = NULL;
+
+ ret = sh_mobile_ceu_capture(pcdev);
+ do_gettimeofday(&vb->v4l2_buf.timestamp);
+ if (!ret) {
+ vb->v4l2_buf.field = pcdev->field;
+ vb->v4l2_buf.sequence = pcdev->sequence++;
+ }
+ vb2_buffer_done(vb, ret < 0 ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+
+out:
+ spin_unlock(&pcdev->lock);
+
+ return IRQ_HANDLED;
+}
+
+static struct v4l2_subdev *find_csi2(struct sh_mobile_ceu_dev *pcdev)
+{
+ struct v4l2_subdev *sd;
+
+ if (!pcdev->csi2_pdev)
+ return NULL;
+
+ v4l2_device_for_each_subdev(sd, &pcdev->ici.v4l2_dev)
+ if (&pcdev->csi2_pdev->dev == v4l2_get_subdevdata(sd))
+ return sd;
+
+ return NULL;
+}
+
+/* Called with .video_lock held */
+static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+ struct v4l2_subdev *csi2_sd;
+ int ret;
+
+ if (pcdev->icd)
+ return -EBUSY;
+
+ dev_info(icd->parent,
+ "SuperH Mobile CEU driver attached to camera %d\n",
+ icd->devnum);
+
+ pm_runtime_get_sync(ici->v4l2_dev.dev);
+
+ pcdev->buf_total = 0;
+
+ ret = sh_mobile_ceu_soft_reset(pcdev);
+
+ csi2_sd = find_csi2(pcdev);
+ if (csi2_sd) {
+ csi2_sd->grp_id = soc_camera_grp_id(icd);
+ v4l2_set_subdev_hostdata(csi2_sd, icd);
+ }
+
+ ret = v4l2_subdev_call(csi2_sd, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) {
+ pm_runtime_put_sync(ici->v4l2_dev.dev);
+ return ret;
+ }
+
+ /*
+ * -ENODEV is special: either csi2_sd == NULL or the CSI-2 driver
+ * has not found this soc-camera device among its clients
+ */
+ if (ret == -ENODEV && csi2_sd)
+ csi2_sd->grp_id = 0;
+ pcdev->icd = icd;
+
+ return 0;
+}
+
+/* Called with .video_lock held */
+static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+ struct v4l2_subdev *csi2_sd = find_csi2(pcdev);
+
+ BUG_ON(icd != pcdev->icd);
+
+ v4l2_subdev_call(csi2_sd, core, s_power, 0);
+ if (csi2_sd)
+ csi2_sd->grp_id = 0;
+ /* disable capture, disable interrupts */
+ ceu_write(pcdev, CEIER, 0);
+ sh_mobile_ceu_soft_reset(pcdev);
+
+ /* make sure active buffer is canceled */
+ spin_lock_irq(&pcdev->lock);
+ if (pcdev->active) {
+ list_del_init(&to_ceu_vb(pcdev->active)->queue);
+ vb2_buffer_done(pcdev->active, VB2_BUF_STATE_ERROR);
+ pcdev->active = NULL;
+ }
+ spin_unlock_irq(&pcdev->lock);
+
+ pm_runtime_put_sync(ici->v4l2_dev.dev);
+
+ dev_info(icd->parent,
+ "SuperH Mobile CEU driver detached from camera %d\n",
+ icd->devnum);
+
+ pcdev->icd = NULL;
+}
+
+/*
+ * See chapter 29.4.12 "Capture Filter Control Register (CFLCR)"
+ * in SH7722 Hardware Manual
+ */
+static unsigned int size_dst(unsigned int src, unsigned int scale)
+{
+ unsigned int mant_pre = scale >> 12;
+ if (!src || !scale)
+ return src;
+ return ((mant_pre + 2 * (src - 1)) / (2 * mant_pre) - 1) *
+ mant_pre * 4096 / scale + 1;
+}
+
+static u16 calc_scale(unsigned int src, unsigned int *dst)
+{
+ u16 scale;
+
+ if (src == *dst)
+ return 0;
+
+ scale = (src * 4096 / *dst) & ~7;
+
+ while (scale > 4096 && size_dst(src, scale) < *dst)
+ scale -= 8;
+
+ *dst = size_dst(src, scale);
+
+ return scale;
+}
+
+/* rect is guaranteed to not exceed the scaled camera rectangle */
+static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct sh_mobile_ceu_cam *cam = icd->host_priv;
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+ unsigned int height, width, cdwdr_width, in_width, in_height;
+ unsigned int left_offset, top_offset;
+ u32 camor;
+
+ dev_geo(icd->parent, "Crop %ux%u@%u:%u\n",
+ icd->user_width, icd->user_height, cam->ceu_left, cam->ceu_top);
+
+ left_offset = cam->ceu_left;
+ top_offset = cam->ceu_top;
+
+ WARN_ON(icd->user_width & 3 || icd->user_height & 3);
+
+ width = icd->user_width;
+
+ if (pcdev->image_mode) {
+ in_width = cam->width;
+ if (!pcdev->is_16bit) {
+ in_width *= 2;
+ left_offset *= 2;
+ }
+ } else {
+ unsigned int w_factor;
+
+ switch (icd->current_fmt->host_fmt->packing) {
+ case SOC_MBUS_PACKING_2X8_PADHI:
+ w_factor = 2;
+ break;
+ default:
+ w_factor = 1;
+ }
+
+ in_width = cam->width * w_factor;
+ left_offset *= w_factor;
+ }
+
+ cdwdr_width = icd->bytesperline;
+
+ height = icd->user_height;
+ in_height = cam->height;
+ if (V4L2_FIELD_NONE != pcdev->field) {
+ height = (height / 2) & ~3;
+ in_height /= 2;
+ top_offset /= 2;
+ cdwdr_width *= 2;
+ }
+
+ /* CSI2 special configuration */
+ if (pcdev->pdata->csi2) {
+ in_width = ((in_width - 2) * 2);
+ left_offset *= 2;
+ }
+
+ /* Set CAMOR, CAPWR, CFSZR, take care of CDWDR */
+ camor = left_offset | (top_offset << 16);
+
+ dev_geo(icd->parent,
+ "CAMOR 0x%x, CAPWR 0x%x, CFSZR 0x%x, CDWDR 0x%x\n", camor,
+ (in_height << 16) | in_width, (height << 16) | width,
+ cdwdr_width);
+
+ ceu_write(pcdev, CAMOR, camor);
+ ceu_write(pcdev, CAPWR, (in_height << 16) | in_width);
+ /* CFSZR clipping is applied _after_ the scaling filter (CFLCR) */
+ ceu_write(pcdev, CFSZR, (height << 16) | width);
+ ceu_write(pcdev, CDWDR, cdwdr_width);
+}
+
+static u32 capture_save_reset(struct sh_mobile_ceu_dev *pcdev)
+{
+ u32 capsr = ceu_read(pcdev, CAPSR);
+ ceu_write(pcdev, CAPSR, 1 << 16); /* reset, stop capture */
+ return capsr;
+}
+
+static void capture_restore(struct sh_mobile_ceu_dev *pcdev, u32 capsr)
+{
+ unsigned long timeout = jiffies + 10 * HZ;
+
+ /*
+ * Wait until the end of the current frame. It can take a long time,
+ * but if it has been aborted by a CAPSR reset, it shoule exit sooner.
+ */
+ while ((ceu_read(pcdev, CSTSR) & 1) && time_before(jiffies, timeout))
+ msleep(1);
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(pcdev->ici.v4l2_dev.dev,
+ "Timeout waiting for frame end! Interface problem?\n");
+ return;
+ }
+
+ /* Wait until reset clears, this shall not hang... */
+ while (ceu_read(pcdev, CAPSR) & (1 << 16))
+ udelay(10);
+
+ /* Anything to restore? */
+ if (capsr & ~(1 << 16))
+ ceu_write(pcdev, CAPSR, capsr);
+}
+
+/* Find the bus subdevice driver, e.g., CSI2 */
+static struct v4l2_subdev *find_bus_subdev(struct sh_mobile_ceu_dev *pcdev,
+ struct soc_camera_device *icd)
+{
+ if (pcdev->csi2_pdev) {
+ struct v4l2_subdev *csi2_sd = find_csi2(pcdev);
+ if (csi2_sd && csi2_sd->grp_id == soc_camera_grp_id(icd))
+ return csi2_sd;
+ }
+
+ return soc_camera_to_subdev(icd);
+}
+
+#define CEU_BUS_FLAGS (V4L2_MBUS_MASTER | \
+ V4L2_MBUS_PCLK_SAMPLE_RISING | \
+ V4L2_MBUS_HSYNC_ACTIVE_HIGH | \
+ V4L2_MBUS_HSYNC_ACTIVE_LOW | \
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH | \
+ V4L2_MBUS_VSYNC_ACTIVE_LOW | \
+ V4L2_MBUS_DATA_ACTIVE_HIGH)
+
+/* Capture is not running, no interrupts, no locking needed */
+static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+ struct v4l2_subdev *sd = find_bus_subdev(pcdev, icd);
+ struct sh_mobile_ceu_cam *cam = icd->host_priv;
+ struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ unsigned long value, common_flags = CEU_BUS_FLAGS;
+ u32 capsr = capture_save_reset(pcdev);
+ unsigned int yuv_lineskip;
+ int ret;
+
+ /*
+ * If the client doesn't implement g_mbus_config, we just use our
+ * platform data
+ */
+ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+ if (!ret) {
+ common_flags = soc_mbus_config_compatible(&cfg,
+ common_flags);
+ if (!common_flags)
+ return -EINVAL;
+ } else if (ret != -ENOIOCTLCMD) {
+ return ret;
+ }
+
+ /* Make choises, based on platform preferences */
+ if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
+ if (pcdev->pdata->flags & SH_CEU_FLAG_HSYNC_LOW)
+ common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH;
+ else
+ common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW;
+ }
+
+ if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) {
+ if (pcdev->pdata->flags & SH_CEU_FLAG_VSYNC_LOW)
+ common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH;
+ else
+ common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW;
+ }
+
+ cfg.flags = common_flags;
+ ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ if (icd->current_fmt->host_fmt->bits_per_sample > 8)
+ pcdev->is_16bit = 1;
+ else
+ pcdev->is_16bit = 0;
+
+ ceu_write(pcdev, CRCNTR, 0);
+ ceu_write(pcdev, CRCMPR, 0);
+
+ value = 0x00000010; /* data fetch by default */
+ yuv_lineskip = 0x10;
+
+ switch (icd->current_fmt->host_fmt->fourcc) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ /* convert 4:2:2 -> 4:2:0 */
+ yuv_lineskip = 0; /* skip for NV12/21, no skip for NV16/61 */
+ /* fall-through */
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ switch (cam->code) {
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ value = 0x00000000; /* Cb0, Y0, Cr0, Y1 */
+ break;
+ case V4L2_MBUS_FMT_VYUY8_2X8:
+ value = 0x00000100; /* Cr0, Y0, Cb0, Y1 */
+ break;
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ value = 0x00000200; /* Y0, Cb0, Y1, Cr0 */
+ break;
+ case V4L2_MBUS_FMT_YVYU8_2X8:
+ value = 0x00000300; /* Y0, Cr0, Y1, Cb0 */
+ break;
+ default:
+ BUG();
+ }
+ }
+
+ if (icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV21 ||
+ icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV61)
+ value ^= 0x00000100; /* swap U, V to change from NV1x->NVx1 */
+
+ value |= common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW ? 1 << 1 : 0;
+ value |= common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW ? 1 << 0 : 0;
+
+ if (pcdev->pdata->csi2) /* CSI2 mode */
+ value |= 3 << 12;
+ else if (pcdev->is_16bit)
+ value |= 1 << 12;
+ else if (pcdev->pdata->flags & SH_CEU_FLAG_LOWER_8BIT)
+ value |= 2 << 12;
+
+ ceu_write(pcdev, CAMCR, value);
+
+ ceu_write(pcdev, CAPCR, 0x00300000);
+
+ switch (pcdev->field) {
+ case V4L2_FIELD_INTERLACED_TB:
+ value = 0x101;
+ break;
+ case V4L2_FIELD_INTERLACED_BT:
+ value = 0x102;
+ break;
+ default:
+ value = 0;
+ break;
+ }
+ ceu_write(pcdev, CAIFR, value);
+
+ sh_mobile_ceu_set_rect(icd);
+ mdelay(1);
+
+ dev_geo(icd->parent, "CFLCR 0x%x\n", pcdev->cflcr);
+ ceu_write(pcdev, CFLCR, pcdev->cflcr);
+
+ /*
+ * A few words about byte order (observed in Big Endian mode)
+ *
+ * In data fetch mode bytes are received in chunks of 8 bytes.
+ * D0, D1, D2, D3, D4, D5, D6, D7 (D0 received first)
+ *
+ * The data is however by default written to memory in reverse order:
+ * D7, D6, D5, D4, D3, D2, D1, D0 (D7 written to lowest byte)
+ *
+ * The lowest three bits of CDOCR allows us to do swapping,
+ * using 7 we swap the data bytes to match the incoming order:
+ * D0, D1, D2, D3, D4, D5, D6, D7
+ */
+ value = 0x00000007 | yuv_lineskip;
+
+ ceu_write(pcdev, CDOCR, value);
+ ceu_write(pcdev, CFWCR, 0); /* keep "datafetch firewall" disabled */
+
+ capture_restore(pcdev, capsr);
+
+ /* not in bundle mode: skip CBDSR, CDAYR2, CDACR2, CDBYR2, CDBCR2 */
+ return 0;
+}
+
+static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd,
+ unsigned char buswidth)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+ struct v4l2_subdev *sd = find_bus_subdev(pcdev, icd);
+ unsigned long common_flags = CEU_BUS_FLAGS;
+ struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ int ret;
+
+ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+ if (!ret)
+ common_flags = soc_mbus_config_compatible(&cfg,
+ common_flags);
+ else if (ret != -ENOIOCTLCMD)
+ return ret;
+
+ if (!common_flags || buswidth > 16)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct soc_mbus_pixelfmt sh_mobile_ceu_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .name = "NV12",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_1_5X8,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PLANAR_2Y_C,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .name = "NV21",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_1_5X8,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PLANAR_2Y_C,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .name = "NV16",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PLANAR_Y_C,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV61,
+ .name = "NV61",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PLANAR_Y_C,
+ },
+};
+
+/* This will be corrected as we get more formats */
+static bool sh_mobile_ceu_packing_supported(const struct soc_mbus_pixelfmt *fmt)
+{
+ return fmt->packing == SOC_MBUS_PACKING_NONE ||
+ (fmt->bits_per_sample == 8 &&
+ fmt->packing == SOC_MBUS_PACKING_1_5X8) ||
+ (fmt->bits_per_sample == 8 &&
+ fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) ||
+ (fmt->bits_per_sample > 8 &&
+ fmt->packing == SOC_MBUS_PACKING_EXTEND16);
+}
+
+static int client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect);
+
+static struct soc_camera_device *ctrl_to_icd(struct v4l2_ctrl *ctrl)
+{
+ return container_of(ctrl->handler, struct soc_camera_device,
+ ctrl_handler);
+}
+
+static int sh_mobile_ceu_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct soc_camera_device *icd = ctrl_to_icd(ctrl);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+
+ switch (ctrl->id) {
+ case V4L2_CID_SHARPNESS:
+ switch (icd->current_fmt->host_fmt->fourcc) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ ceu_write(pcdev, CLFCR, !ctrl->val);
+ return 0;
+ }
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops sh_mobile_ceu_ctrl_ops = {
+ .s_ctrl = sh_mobile_ceu_s_ctrl,
+};
+
+static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int idx,
+ struct soc_camera_format_xlate *xlate)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct device *dev = icd->parent;
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+ int ret, k, n;
+ int formats = 0;
+ struct sh_mobile_ceu_cam *cam;
+ enum v4l2_mbus_pixelcode code;
+ const struct soc_mbus_pixelfmt *fmt;
+
+ ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
+ if (ret < 0)
+ /* No more formats */
+ return 0;
+
+ fmt = soc_mbus_get_fmtdesc(code);
+ if (!fmt) {
+ dev_warn(dev, "unsupported format code #%u: %d\n", idx, code);
+ return 0;
+ }
+
+ if (!pcdev->pdata->csi2) {
+ /* Are there any restrictions in the CSI-2 case? */
+ ret = sh_mobile_ceu_try_bus_param(icd, fmt->bits_per_sample);
+ if (ret < 0)
+ return 0;
+ }
+
+ if (!icd->host_priv) {
+ struct v4l2_mbus_framefmt mf;
+ struct v4l2_rect rect;
+ int shift = 0;
+
+ /* Add our control */
+ v4l2_ctrl_new_std(&icd->ctrl_handler, &sh_mobile_ceu_ctrl_ops,
+ V4L2_CID_SHARPNESS, 0, 1, 1, 0);
+ if (icd->ctrl_handler.error)
+ return icd->ctrl_handler.error;
+
+ /* FIXME: subwindow is lost between close / open */
+
+ /* Cache current client geometry */
+ ret = client_g_rect(sd, &rect);
+ if (ret < 0)
+ return ret;
+
+ /* First time */
+ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * All currently existing CEU implementations support 2560x1920
+ * or larger frames. If the sensor is proposing too big a frame,
+ * don't bother with possibly supportred by the CEU larger
+ * sizes, just try VGA multiples. If needed, this can be
+ * adjusted in the future.
+ */
+ while ((mf.width > pcdev->max_width ||
+ mf.height > pcdev->max_height) && shift < 4) {
+ /* Try 2560x1920, 1280x960, 640x480, 320x240 */
+ mf.width = 2560 >> shift;
+ mf.height = 1920 >> shift;
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
+ s_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+ shift++;
+ }
+
+ if (shift == 4) {
+ dev_err(dev, "Failed to configure the client below %ux%x\n",
+ mf.width, mf.height);
+ return -EIO;
+ }
+
+ dev_geo(dev, "camera fmt %ux%u\n", mf.width, mf.height);
+
+ cam = kzalloc(sizeof(*cam), GFP_KERNEL);
+ if (!cam)
+ return -ENOMEM;
+
+ /* We are called with current camera crop, initialise subrect with it */
+ cam->rect = rect;
+ cam->subrect = rect;
+
+ cam->width = mf.width;
+ cam->height = mf.height;
+
+ icd->host_priv = cam;
+ } else {
+ cam = icd->host_priv;
+ }
+
+ /* Beginning of a pass */
+ if (!idx)
+ cam->extra_fmt = NULL;
+
+ switch (code) {
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ case V4L2_MBUS_FMT_VYUY8_2X8:
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ case V4L2_MBUS_FMT_YVYU8_2X8:
+ if (cam->extra_fmt)
+ break;
+
+ /*
+ * Our case is simple so far: for any of the above four camera
+ * formats we add all our four synthesized NV* formats, so,
+ * just marking the device with a single flag suffices. If
+ * the format generation rules are more complex, you would have
+ * to actually hang your already added / counted formats onto
+ * the host_priv pointer and check whether the format you're
+ * going to add now is already there.
+ */
+ cam->extra_fmt = sh_mobile_ceu_formats;
+
+ n = ARRAY_SIZE(sh_mobile_ceu_formats);
+ formats += n;
+ for (k = 0; xlate && k < n; k++) {
+ xlate->host_fmt = &sh_mobile_ceu_formats[k];
+ xlate->code = code;
+ xlate++;
+ dev_dbg(dev, "Providing format %s using code %d\n",
+ sh_mobile_ceu_formats[k].name, code);
+ }
+ break;
+ default:
+ if (!sh_mobile_ceu_packing_supported(fmt))
+ return 0;
+ }
+
+ /* Generic pass-through */
+ formats++;
+ if (xlate) {
+ xlate->host_fmt = fmt;
+ xlate->code = code;
+ xlate++;
+ dev_dbg(dev, "Providing format %s in pass-through mode\n",
+ fmt->name);
+ }
+
+ return formats;
+}
+
+static void sh_mobile_ceu_put_formats(struct soc_camera_device *icd)
+{
+ kfree(icd->host_priv);
+ icd->host_priv = NULL;
+}
+
+/* Check if any dimension of r1 is smaller than respective one of r2 */
+static bool is_smaller(struct v4l2_rect *r1, struct v4l2_rect *r2)
+{
+ return r1->width < r2->width || r1->height < r2->height;
+}
+
+/* Check if r1 fails to cover r2 */
+static bool is_inside(struct v4l2_rect *r1, struct v4l2_rect *r2)
+{
+ return r1->left > r2->left || r1->top > r2->top ||
+ r1->left + r1->width < r2->left + r2->width ||
+ r1->top + r1->height < r2->top + r2->height;
+}
+
+static unsigned int scale_down(unsigned int size, unsigned int scale)
+{
+ return (size * 4096 + scale / 2) / scale;
+}
+
+static unsigned int calc_generic_scale(unsigned int input, unsigned int output)
+{
+ return (input * 4096 + output / 2) / output;
+}
+
+/* Get and store current client crop */
+static int client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect)
+{
+ struct v4l2_crop crop;
+ struct v4l2_cropcap cap;
+ int ret;
+
+ crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+ ret = v4l2_subdev_call(sd, video, g_crop, &crop);
+ if (!ret) {
+ *rect = crop.c;
+ return ret;
+ }
+
+ /* Camera driver doesn't support .g_crop(), assume default rectangle */
+ cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+ ret = v4l2_subdev_call(sd, video, cropcap, &cap);
+ if (!ret)
+ *rect = cap.defrect;
+
+ return ret;
+}
+
+/* Client crop has changed, update our sub-rectangle to remain within the area */
+static void update_subrect(struct sh_mobile_ceu_cam *cam)
+{
+ struct v4l2_rect *rect = &cam->rect, *subrect = &cam->subrect;
+
+ if (rect->width < subrect->width)
+ subrect->width = rect->width;
+
+ if (rect->height < subrect->height)
+ subrect->height = rect->height;
+
+ if (rect->left > subrect->left)
+ subrect->left = rect->left;
+ else if (rect->left + rect->width >
+ subrect->left + subrect->width)
+ subrect->left = rect->left + rect->width -
+ subrect->width;
+
+ if (rect->top > subrect->top)
+ subrect->top = rect->top;
+ else if (rect->top + rect->height >
+ subrect->top + subrect->height)
+ subrect->top = rect->top + rect->height -
+ subrect->height;
+}
+
+/*
+ * The common for both scaling and cropping iterative approach is:
+ * 1. try if the client can produce exactly what requested by the user
+ * 2. if (1) failed, try to double the client image until we get one big enough
+ * 3. if (2) failed, try to request the maximum image
+ */
+static int client_s_crop(struct soc_camera_device *icd, struct v4l2_crop *crop,
+ const struct v4l2_crop *cam_crop)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct v4l2_rect *rect = &crop->c, *cam_rect = &cam_crop->c;
+ struct device *dev = sd->v4l2_dev->dev;
+ struct sh_mobile_ceu_cam *cam = icd->host_priv;
+ struct v4l2_cropcap cap;
+ int ret;
+ unsigned int width, height;
+
+ v4l2_subdev_call(sd, video, s_crop, crop);
+ ret = client_g_rect(sd, cam_rect);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Now cam_crop contains the current camera input rectangle, and it must
+ * be within camera cropcap bounds
+ */
+ if (!memcmp(rect, cam_rect, sizeof(*rect))) {
+ /* Even if camera S_CROP failed, but camera rectangle matches */
+ dev_dbg(dev, "Camera S_CROP successful for %dx%d@%d:%d\n",
+ rect->width, rect->height, rect->left, rect->top);
+ cam->rect = *cam_rect;
+ return 0;
+ }
+
+ /* Try to fix cropping, that camera hasn't managed to set */
+ dev_geo(dev, "Fix camera S_CROP for %dx%d@%d:%d to %dx%d@%d:%d\n",
+ cam_rect->width, cam_rect->height,
+ cam_rect->left, cam_rect->top,
+ rect->width, rect->height, rect->left, rect->top);
+
+ /* We need sensor maximum rectangle */
+ ret = v4l2_subdev_call(sd, video, cropcap, &cap);
+ if (ret < 0)
+ return ret;
+
+ /* Put user requested rectangle within sensor bounds */
+ soc_camera_limit_side(&rect->left, &rect->width, cap.bounds.left, 2,
+ cap.bounds.width);
+ soc_camera_limit_side(&rect->top, &rect->height, cap.bounds.top, 4,
+ cap.bounds.height);
+
+ /*
+ * Popular special case - some cameras can only handle fixed sizes like
+ * QVGA, VGA,... Take care to avoid infinite loop.
+ */
+ width = max(cam_rect->width, 2);
+ height = max(cam_rect->height, 2);
+
+ /*
+ * Loop as long as sensor is not covering the requested rectangle and
+ * is still within its bounds
+ */
+ while (!ret && (is_smaller(cam_rect, rect) ||
+ is_inside(cam_rect, rect)) &&
+ (cap.bounds.width > width || cap.bounds.height > height)) {
+
+ width *= 2;
+ height *= 2;
+
+ cam_rect->width = width;
+ cam_rect->height = height;
+
+ /*
+ * We do not know what capabilities the camera has to set up
+ * left and top borders. We could try to be smarter in iterating
+ * them, e.g., if camera current left is to the right of the
+ * target left, set it to the middle point between the current
+ * left and minimum left. But that would add too much
+ * complexity: we would have to iterate each border separately.
+ * Instead we just drop to the left and top bounds.
+ */
+ if (cam_rect->left > rect->left)
+ cam_rect->left = cap.bounds.left;
+
+ if (cam_rect->left + cam_rect->width < rect->left + rect->width)
+ cam_rect->width = rect->left + rect->width -
+ cam_rect->left;
+
+ if (cam_rect->top > rect->top)
+ cam_rect->top = cap.bounds.top;
+
+ if (cam_rect->top + cam_rect->height < rect->top + rect->height)
+ cam_rect->height = rect->top + rect->height -
+ cam_rect->top;
+
+ v4l2_subdev_call(sd, video, s_crop, cam_crop);
+ ret = client_g_rect(sd, cam_rect);
+ dev_geo(dev, "Camera S_CROP %d for %dx%d@%d:%d\n", ret,
+ cam_rect->width, cam_rect->height,
+ cam_rect->left, cam_rect->top);
+ }
+
+ /* S_CROP must not modify the rectangle */
+ if (is_smaller(cam_rect, rect) || is_inside(cam_rect, rect)) {
+ /*
+ * The camera failed to configure a suitable cropping,
+ * we cannot use the current rectangle, set to max
+ */
+ *cam_rect = cap.bounds;
+ v4l2_subdev_call(sd, video, s_crop, cam_crop);
+ ret = client_g_rect(sd, cam_rect);
+ dev_geo(dev, "Camera S_CROP %d for max %dx%d@%d:%d\n", ret,
+ cam_rect->width, cam_rect->height,
+ cam_rect->left, cam_rect->top);
+ }
+
+ if (!ret) {
+ cam->rect = *cam_rect;
+ update_subrect(cam);
+ }
+
+ return ret;
+}
+
+/* Iterative s_mbus_fmt, also updates cached client crop on success */
+static int client_s_fmt(struct soc_camera_device *icd,
+ struct v4l2_mbus_framefmt *mf, bool ceu_can_scale)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+ struct sh_mobile_ceu_cam *cam = icd->host_priv;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct device *dev = icd->parent;
+ unsigned int width = mf->width, height = mf->height, tmp_w, tmp_h;
+ unsigned int max_width, max_height;
+ struct v4l2_cropcap cap;
+ bool ceu_1to1;
+ int ret;
+
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
+ s_mbus_fmt, mf);
+ if (ret < 0)
+ return ret;
+
+ dev_geo(dev, "camera scaled to %ux%u\n", mf->width, mf->height);
+
+ if (width == mf->width && height == mf->height) {
+ /* Perfect! The client has done it all. */
+ ceu_1to1 = true;
+ goto update_cache;
+ }
+
+ ceu_1to1 = false;
+ if (!ceu_can_scale)
+ goto update_cache;
+
+ cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+ ret = v4l2_subdev_call(sd, video, cropcap, &cap);
+ if (ret < 0)
+ return ret;
+
+ max_width = min(cap.bounds.width, pcdev->max_width);
+ max_height = min(cap.bounds.height, pcdev->max_height);
+
+ /* Camera set a format, but geometry is not precise, try to improve */
+ tmp_w = mf->width;
+ tmp_h = mf->height;
+
+ /* width <= max_width && height <= max_height - guaranteed by try_fmt */
+ while ((width > tmp_w || height > tmp_h) &&
+ tmp_w < max_width && tmp_h < max_height) {
+ tmp_w = min(2 * tmp_w, max_width);
+ tmp_h = min(2 * tmp_h, max_height);
+ mf->width = tmp_w;
+ mf->height = tmp_h;
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
+ s_mbus_fmt, mf);
+ dev_geo(dev, "Camera scaled to %ux%u\n",
+ mf->width, mf->height);
+ if (ret < 0) {
+ /* This shouldn't happen */
+ dev_err(dev, "Client failed to set format: %d\n", ret);
+ return ret;
+ }
+ }
+
+update_cache:
+ /* Update cache */
+ ret = client_g_rect(sd, &cam->rect);
+ if (ret < 0)
+ return ret;
+
+ if (ceu_1to1)
+ cam->subrect = cam->rect;
+ else
+ update_subrect(cam);
+
+ return 0;
+}
+
+/**
+ * @width - on output: user width, mapped back to input
+ * @height - on output: user height, mapped back to input
+ * @mf - in- / output camera output window
+ */
+static int client_scale(struct soc_camera_device *icd,
+ struct v4l2_mbus_framefmt *mf,
+ unsigned int *width, unsigned int *height,
+ bool ceu_can_scale)
+{
+ struct sh_mobile_ceu_cam *cam = icd->host_priv;
+ struct device *dev = icd->parent;
+ struct v4l2_mbus_framefmt mf_tmp = *mf;
+ unsigned int scale_h, scale_v;
+ int ret;
+
+ /*
+ * 5. Apply iterative camera S_FMT for camera user window (also updates
+ * client crop cache and the imaginary sub-rectangle).
+ */
+ ret = client_s_fmt(icd, &mf_tmp, ceu_can_scale);
+ if (ret < 0)
+ return ret;
+
+ dev_geo(dev, "5: camera scaled to %ux%u\n",
+ mf_tmp.width, mf_tmp.height);
+
+ /* 6. Retrieve camera output window (g_fmt) */
+
+ /* unneeded - it is already in "mf_tmp" */
+
+ /* 7. Calculate new client scales. */
+ scale_h = calc_generic_scale(cam->rect.width, mf_tmp.width);
+ scale_v = calc_generic_scale(cam->rect.height, mf_tmp.height);
+
+ mf->width = mf_tmp.width;
+ mf->height = mf_tmp.height;
+ mf->colorspace = mf_tmp.colorspace;
+
+ /*
+ * 8. Calculate new CEU crop - apply camera scales to previously
+ * updated "effective" crop.
+ */
+ *width = scale_down(cam->subrect.width, scale_h);
+ *height = scale_down(cam->subrect.height, scale_v);
+
+ dev_geo(dev, "8: new client sub-window %ux%u\n", *width, *height);
+
+ return 0;
+}
+
+/*
+ * CEU can scale and crop, but we don't want to waste bandwidth and kill the
+ * framerate by always requesting the maximum image from the client. See
+ * Documentation/video4linux/sh_mobile_ceu_camera.txt for a description of
+ * scaling and cropping algorithms and for the meaning of referenced here steps.
+ */
+static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
+ const struct v4l2_crop *a)
+{
+ struct v4l2_rect *rect = &a->c;
+ struct device *dev = icd->parent;
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+ struct v4l2_crop cam_crop;
+ struct sh_mobile_ceu_cam *cam = icd->host_priv;
+ struct v4l2_rect *cam_rect = &cam_crop.c;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct v4l2_mbus_framefmt mf;
+ unsigned int scale_cam_h, scale_cam_v, scale_ceu_h, scale_ceu_v,
+ out_width, out_height;
+ int interm_width, interm_height;
+ u32 capsr, cflcr;
+ int ret;
+
+ dev_geo(dev, "S_CROP(%ux%u@%u:%u)\n", rect->width, rect->height,
+ rect->left, rect->top);
+
+ /* During camera cropping its output window can change too, stop CEU */
+ capsr = capture_save_reset(pcdev);
+ dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr);
+
+ /*
+ * 1. - 2. Apply iterative camera S_CROP for new input window, read back
+ * actual camera rectangle.
+ */
+ ret = client_s_crop(icd, a, &cam_crop);
+ if (ret < 0)
+ return ret;
+
+ dev_geo(dev, "1-2: camera cropped to %ux%u@%u:%u\n",
+ cam_rect->width, cam_rect->height,
+ cam_rect->left, cam_rect->top);
+
+ /* On success cam_crop contains current camera crop */
+
+ /* 3. Retrieve camera output window */
+ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ if (mf.width > pcdev->max_width || mf.height > pcdev->max_height)
+ return -EINVAL;
+
+ /* 4. Calculate camera scales */
+ scale_cam_h = calc_generic_scale(cam_rect->width, mf.width);
+ scale_cam_v = calc_generic_scale(cam_rect->height, mf.height);
+
+ /* Calculate intermediate window */
+ interm_width = scale_down(rect->width, scale_cam_h);
+ interm_height = scale_down(rect->height, scale_cam_v);
+
+ if (interm_width < icd->user_width) {
+ u32 new_scale_h;
+
+ new_scale_h = calc_generic_scale(rect->width, icd->user_width);
+
+ mf.width = scale_down(cam_rect->width, new_scale_h);
+ }
+
+ if (interm_height < icd->user_height) {
+ u32 new_scale_v;
+
+ new_scale_v = calc_generic_scale(rect->height, icd->user_height);
+
+ mf.height = scale_down(cam_rect->height, new_scale_v);
+ }
+
+ if (interm_width < icd->user_width || interm_height < icd->user_height) {
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
+ s_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ dev_geo(dev, "New camera output %ux%u\n", mf.width, mf.height);
+ scale_cam_h = calc_generic_scale(cam_rect->width, mf.width);
+ scale_cam_v = calc_generic_scale(cam_rect->height, mf.height);
+ interm_width = scale_down(rect->width, scale_cam_h);
+ interm_height = scale_down(rect->height, scale_cam_v);
+ }
+
+ /* Cache camera output window */
+ cam->width = mf.width;
+ cam->height = mf.height;
+
+ if (pcdev->image_mode) {
+ out_width = min(interm_width, icd->user_width);
+ out_height = min(interm_height, icd->user_height);
+ } else {
+ out_width = interm_width;
+ out_height = interm_height;
+ }
+
+ /*
+ * 5. Calculate CEU scales from camera scales from results of (5) and
+ * the user window
+ */
+ scale_ceu_h = calc_scale(interm_width, &out_width);
+ scale_ceu_v = calc_scale(interm_height, &out_height);
+
+ dev_geo(dev, "5: CEU scales %u:%u\n", scale_ceu_h, scale_ceu_v);
+
+ /* Apply CEU scales. */
+ cflcr = scale_ceu_h | (scale_ceu_v << 16);
+ if (cflcr != pcdev->cflcr) {
+ pcdev->cflcr = cflcr;
+ ceu_write(pcdev, CFLCR, cflcr);
+ }
+
+ icd->user_width = out_width & ~3;
+ icd->user_height = out_height & ~3;
+ /* Offsets are applied at the CEU scaling filter input */
+ cam->ceu_left = scale_down(rect->left - cam_rect->left, scale_cam_h) & ~1;
+ cam->ceu_top = scale_down(rect->top - cam_rect->top, scale_cam_v) & ~1;
+
+ /* 6. Use CEU cropping to crop to the new window. */
+ sh_mobile_ceu_set_rect(icd);
+
+ cam->subrect = *rect;
+
+ dev_geo(dev, "6: CEU cropped to %ux%u@%u:%u\n",
+ icd->user_width, icd->user_height,
+ cam->ceu_left, cam->ceu_top);
+
+ /* Restore capture. The CE bit can be cleared by the hardware */
+ if (pcdev->active)
+ capsr |= 1;
+ capture_restore(pcdev, capsr);
+
+ /* Even if only camera cropping succeeded */
+ return ret;
+}
+
+static int sh_mobile_ceu_get_crop(struct soc_camera_device *icd,
+ struct v4l2_crop *a)
+{
+ struct sh_mobile_ceu_cam *cam = icd->host_priv;
+
+ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ a->c = cam->subrect;
+
+ return 0;
+}
+
+/*
+ * Calculate real client output window by applying new scales to the current
+ * client crop. New scales are calculated from the requested output format and
+ * CEU crop, mapped backed onto the client input (subrect).
+ */
+static void calculate_client_output(struct soc_camera_device *icd,
+ const struct v4l2_pix_format *pix, struct v4l2_mbus_framefmt *mf)
+{
+ struct sh_mobile_ceu_cam *cam = icd->host_priv;
+ struct device *dev = icd->parent;
+ struct v4l2_rect *cam_subrect = &cam->subrect;
+ unsigned int scale_v, scale_h;
+
+ if (cam_subrect->width == cam->rect.width &&
+ cam_subrect->height == cam->rect.height) {
+ /* No sub-cropping */
+ mf->width = pix->width;
+ mf->height = pix->height;
+ return;
+ }
+
+ /* 1.-2. Current camera scales and subwin - cached. */
+
+ dev_geo(dev, "2: subwin %ux%u@%u:%u\n",
+ cam_subrect->width, cam_subrect->height,
+ cam_subrect->left, cam_subrect->top);
+
+ /*
+ * 3. Calculate new combined scales from input sub-window to requested
+ * user window.
+ */
+
+ /*
+ * TODO: CEU cannot scale images larger than VGA to smaller than SubQCIF
+ * (128x96) or larger than VGA
+ */
+ scale_h = calc_generic_scale(cam_subrect->width, pix->width);
+ scale_v = calc_generic_scale(cam_subrect->height, pix->height);
+
+ dev_geo(dev, "3: scales %u:%u\n", scale_h, scale_v);
+
+ /*
+ * 4. Calculate desired client output window by applying combined scales
+ * to client (real) input window.
+ */
+ mf->width = scale_down(cam->rect.width, scale_h);
+ mf->height = scale_down(cam->rect.height, scale_v);
+}
+
+/* Similar to set_crop multistage iterative algorithm */
+static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct device *dev = icd->parent;
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+ struct sh_mobile_ceu_cam *cam = icd->host_priv;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_mbus_framefmt mf;
+ __u32 pixfmt = pix->pixelformat;
+ const struct soc_camera_format_xlate *xlate;
+ /* Keep Compiler Happy */
+ unsigned int ceu_sub_width = 0, ceu_sub_height = 0;
+ u16 scale_v, scale_h;
+ int ret;
+ bool image_mode;
+ enum v4l2_field field;
+
+ switch (pix->field) {
+ default:
+ pix->field = V4L2_FIELD_NONE;
+ /* fall-through */
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ case V4L2_FIELD_NONE:
+ field = pix->field;
+ break;
+ case V4L2_FIELD_INTERLACED:
+ field = V4L2_FIELD_INTERLACED_TB;
+ break;
+ }
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+ if (!xlate) {
+ dev_warn(dev, "Format %x not found\n", pixfmt);
+ return -EINVAL;
+ }
+
+ /* 1.-4. Calculate desired client output geometry */
+ calculate_client_output(icd, pix, &mf);
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
+
+ switch (pixfmt) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ image_mode = true;
+ break;
+ default:
+ image_mode = false;
+ }
+
+ dev_geo(dev, "S_FMT(pix=0x%x, fld 0x%x, code 0x%x, %ux%u)\n", pixfmt, mf.field, mf.code,
+ pix->width, pix->height);
+
+ dev_geo(dev, "4: request camera output %ux%u\n", mf.width, mf.height);
+
+ /* 5. - 9. */
+ ret = client_scale(icd, &mf, &ceu_sub_width, &ceu_sub_height,
+ image_mode && V4L2_FIELD_NONE == field);
+
+ dev_geo(dev, "5-9: client scale return %d\n", ret);
+
+ /* Done with the camera. Now see if we can improve the result */
+
+ dev_geo(dev, "fmt %ux%u, requested %ux%u\n",
+ mf.width, mf.height, pix->width, pix->height);
+ if (ret < 0)
+ return ret;
+
+ if (mf.code != xlate->code)
+ return -EINVAL;
+
+ /* 9. Prepare CEU crop */
+ cam->width = mf.width;
+ cam->height = mf.height;
+
+ /* 10. Use CEU scaling to scale to the requested user window. */
+
+ /* We cannot scale up */
+ if (pix->width > ceu_sub_width)
+ ceu_sub_width = pix->width;
+
+ if (pix->height > ceu_sub_height)
+ ceu_sub_height = pix->height;
+
+ pix->colorspace = mf.colorspace;
+
+ if (image_mode) {
+ /* Scale pix->{width x height} down to width x height */
+ scale_h = calc_scale(ceu_sub_width, &pix->width);
+ scale_v = calc_scale(ceu_sub_height, &pix->height);
+ } else {
+ pix->width = ceu_sub_width;
+ pix->height = ceu_sub_height;
+ scale_h = 0;
+ scale_v = 0;
+ }
+
+ pcdev->cflcr = scale_h | (scale_v << 16);
+
+ /*
+ * We have calculated CFLCR, the actual configuration will be performed
+ * in sh_mobile_ceu_set_bus_param()
+ */
+
+ dev_geo(dev, "10: W: %u : 0x%x = %u, H: %u : 0x%x = %u\n",
+ ceu_sub_width, scale_h, pix->width,
+ ceu_sub_height, scale_v, pix->height);
+
+ cam->code = xlate->code;
+ icd->current_fmt = xlate;
+
+ pcdev->field = field;
+ pcdev->image_mode = image_mode;
+
+ /* CFSZR requirement */
+ pix->width &= ~3;
+ pix->height &= ~3;
+
+ return 0;
+}
+
+#define CEU_CHDW_MAX 8188U /* Maximum line stride */
+
+static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+ const struct soc_camera_format_xlate *xlate;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct v4l2_mbus_framefmt mf;
+ __u32 pixfmt = pix->pixelformat;
+ int width, height;
+ int ret;
+
+ dev_geo(icd->parent, "TRY_FMT(pix=0x%x, %ux%u)\n",
+ pixfmt, pix->width, pix->height);
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+ if (!xlate) {
+ xlate = icd->current_fmt;
+ dev_dbg(icd->parent, "Format %x not found, keeping %x\n",
+ pixfmt, xlate->host_fmt->fourcc);
+ pixfmt = xlate->host_fmt->fourcc;
+ pix->pixelformat = pixfmt;
+ pix->colorspace = icd->colorspace;
+ }
+
+ /* FIXME: calculate using depth and bus width */
+
+ /* CFSZR requires height and width to be 4-pixel aligned */
+ v4l_bound_align_image(&pix->width, 2, pcdev->max_width, 2,
+ &pix->height, 4, pcdev->max_height, 2, 0);
+
+ width = pix->width;
+ height = pix->height;
+
+ /* limit to sensor capabilities */
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.field = pix->field;
+ mf.code = xlate->code;
+ mf.colorspace = pix->colorspace;
+
+ ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd),
+ video, try_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->field = mf.field;
+ pix->colorspace = mf.colorspace;
+
+ switch (pixfmt) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ /* FIXME: check against rect_max after converting soc-camera */
+ /* We can scale precisely, need a bigger image from camera */
+ if (pix->width < width || pix->height < height) {
+ /*
+ * We presume, the sensor behaves sanely, i.e., if
+ * requested a bigger rectangle, it will not return a
+ * smaller one.
+ */
+ mf.width = pcdev->max_width;
+ mf.height = pcdev->max_height;
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
+ try_mbus_fmt, &mf);
+ if (ret < 0) {
+ /* Shouldn't actually happen... */
+ dev_err(icd->parent,
+ "FIXME: client try_fmt() = %d\n", ret);
+ return ret;
+ }
+ }
+ /* We will scale exactly */
+ if (mf.width > width)
+ pix->width = width;
+ if (mf.height > height)
+ pix->height = height;
+
+ pix->bytesperline = max(pix->bytesperline, pix->width);
+ pix->bytesperline = min(pix->bytesperline, CEU_CHDW_MAX);
+ pix->bytesperline &= ~3;
+ break;
+
+ default:
+ /* Configurable stride isn't supported in pass-through mode. */
+ pix->bytesperline = 0;
+ }
+
+ pix->width &= ~3;
+ pix->height &= ~3;
+ pix->sizeimage = 0;
+
+ dev_geo(icd->parent, "%s(): return %d, fmt 0x%x, %ux%u\n",
+ __func__, ret, pix->pixelformat, pix->width, pix->height);
+
+ return ret;
+}
+
+static int sh_mobile_ceu_set_livecrop(struct soc_camera_device *icd,
+ struct v4l2_crop *a)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct sh_mobile_ceu_dev *pcdev = ici->priv;
+ u32 out_width = icd->user_width, out_height = icd->user_height;
+ int ret;
+
+ /* Freeze queue */
+ pcdev->frozen = 1;
+ /* Wait for frame */
+ ret = wait_for_completion_interruptible(&pcdev->complete);
+ /* Stop the client */
+ ret = v4l2_subdev_call(sd, video, s_stream, 0);
+ if (ret < 0)
+ dev_warn(icd->parent,
+ "Client failed to stop the stream: %d\n", ret);
+ else
+ /* Do the crop, if it fails, there's nothing more we can do */
+ sh_mobile_ceu_set_crop(icd, a);
+
+ dev_geo(icd->parent, "Output after crop: %ux%u\n", icd->user_width, icd->user_height);
+
+ if (icd->user_width != out_width || icd->user_height != out_height) {
+ struct v4l2_format f = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .fmt.pix = {
+ .width = out_width,
+ .height = out_height,
+ .pixelformat = icd->current_fmt->host_fmt->fourcc,
+ .field = pcdev->field,
+ .colorspace = icd->colorspace,
+ },
+ };
+ ret = sh_mobile_ceu_set_fmt(icd, &f);
+ if (!ret && (out_width != f.fmt.pix.width ||
+ out_height != f.fmt.pix.height))
+ ret = -EINVAL;
+ if (!ret) {
+ icd->user_width = out_width & ~3;
+ icd->user_height = out_height & ~3;
+ ret = sh_mobile_ceu_set_bus_param(icd);
+ }
+ }
+
+ /* Thaw the queue */
+ pcdev->frozen = 0;
+ spin_lock_irq(&pcdev->lock);
+ sh_mobile_ceu_capture(pcdev);
+ spin_unlock_irq(&pcdev->lock);
+ /* Start the client */
+ ret = v4l2_subdev_call(sd, video, s_stream, 1);
+ return ret;
+}
+
+static unsigned int sh_mobile_ceu_poll(struct file *file, poll_table *pt)
+{
+ struct soc_camera_device *icd = file->private_data;
+
+ return vb2_poll(&icd->vb2_vidq, file, pt);
+}
+
+static int sh_mobile_ceu_querycap(struct soc_camera_host *ici,
+ struct v4l2_capability *cap)
+{
+ strlcpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card));
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ return 0;
+}
+
+static int sh_mobile_ceu_init_videobuf(struct vb2_queue *q,
+ struct soc_camera_device *icd)
+{
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->drv_priv = icd;
+ q->ops = &sh_mobile_ceu_videobuf_ops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct sh_mobile_ceu_buffer);
+
+ return vb2_queue_init(q);
+}
+
+static struct soc_camera_host_ops sh_mobile_ceu_host_ops = {
+ .owner = THIS_MODULE,
+ .add = sh_mobile_ceu_add_device,
+ .remove = sh_mobile_ceu_remove_device,
+ .get_formats = sh_mobile_ceu_get_formats,
+ .put_formats = sh_mobile_ceu_put_formats,
+ .get_crop = sh_mobile_ceu_get_crop,
+ .set_crop = sh_mobile_ceu_set_crop,
+ .set_livecrop = sh_mobile_ceu_set_livecrop,
+ .set_fmt = sh_mobile_ceu_set_fmt,
+ .try_fmt = sh_mobile_ceu_try_fmt,
+ .poll = sh_mobile_ceu_poll,
+ .querycap = sh_mobile_ceu_querycap,
+ .set_bus_param = sh_mobile_ceu_set_bus_param,
+ .init_videobuf2 = sh_mobile_ceu_init_videobuf,
+};
+
+struct bus_wait {
+ struct notifier_block notifier;
+ struct completion completion;
+ struct device *dev;
+};
+
+static int bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct bus_wait *wait = container_of(nb, struct bus_wait, notifier);
+
+ if (wait->dev != dev)
+ return NOTIFY_DONE;
+
+ switch (action) {
+ case BUS_NOTIFY_UNBOUND_DRIVER:
+ /* Protect from module unloading */
+ wait_for_completion(&wait->completion);
+ return NOTIFY_OK;
+ }
+ return NOTIFY_DONE;
+}
+
+static int __devinit sh_mobile_ceu_probe(struct platform_device *pdev)
+{
+ struct sh_mobile_ceu_dev *pcdev;
+ struct resource *res;
+ void __iomem *base;
+ unsigned int irq;
+ int err = 0;
+ struct bus_wait wait = {
+ .completion = COMPLETION_INITIALIZER_ONSTACK(wait.completion),
+ .notifier.notifier_call = bus_notify,
+ };
+ struct sh_mobile_ceu_companion *csi2;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!res || (int)irq <= 0) {
+ dev_err(&pdev->dev, "Not enough CEU platform resources.\n");
+ err = -ENODEV;
+ goto exit;
+ }
+
+ pcdev = kzalloc(sizeof(*pcdev), GFP_KERNEL);
+ if (!pcdev) {
+ dev_err(&pdev->dev, "Could not allocate pcdev\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ INIT_LIST_HEAD(&pcdev->capture);
+ spin_lock_init(&pcdev->lock);
+ init_completion(&pcdev->complete);
+
+ pcdev->pdata = pdev->dev.platform_data;
+ if (!pcdev->pdata) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "CEU platform data not set.\n");
+ goto exit_kfree;
+ }
+
+ pcdev->max_width = pcdev->pdata->max_width ? : 2560;
+ pcdev->max_height = pcdev->pdata->max_height ? : 1920;
+
+ base = ioremap_nocache(res->start, resource_size(res));
+ if (!base) {
+ err = -ENXIO;
+ dev_err(&pdev->dev, "Unable to ioremap CEU registers.\n");
+ goto exit_kfree;
+ }
+
+ pcdev->irq = irq;
+ pcdev->base = base;
+ pcdev->video_limit = 0; /* only enabled if second resource exists */
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ err = dma_declare_coherent_memory(&pdev->dev, res->start,
+ res->start,
+ resource_size(res),
+ DMA_MEMORY_MAP |
+ DMA_MEMORY_EXCLUSIVE);
+ if (!err) {
+ dev_err(&pdev->dev, "Unable to declare CEU memory.\n");
+ err = -ENXIO;
+ goto exit_iounmap;
+ }
+
+ pcdev->video_limit = resource_size(res);
+ }
+
+ /* request irq */
+ err = request_irq(pcdev->irq, sh_mobile_ceu_irq, IRQF_DISABLED,
+ dev_name(&pdev->dev), pcdev);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to register CEU interrupt.\n");
+ goto exit_release_mem;
+ }
+
+ pm_suspend_ignore_children(&pdev->dev, true);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_resume(&pdev->dev);
+
+ pcdev->ici.priv = pcdev;
+ pcdev->ici.v4l2_dev.dev = &pdev->dev;
+ pcdev->ici.nr = pdev->id;
+ pcdev->ici.drv_name = dev_name(&pdev->dev);
+ pcdev->ici.ops = &sh_mobile_ceu_host_ops;
+ pcdev->ici.capabilities = SOCAM_HOST_CAP_STRIDE;
+
+ pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+ if (IS_ERR(pcdev->alloc_ctx)) {
+ err = PTR_ERR(pcdev->alloc_ctx);
+ goto exit_free_clk;
+ }
+
+ err = soc_camera_host_register(&pcdev->ici);
+ if (err)
+ goto exit_free_ctx;
+
+ /* CSI2 interfacing */
+ csi2 = pcdev->pdata->csi2;
+ if (csi2) {
+ struct platform_device *csi2_pdev =
+ platform_device_alloc("sh-mobile-csi2", csi2->id);
+ struct sh_csi2_pdata *csi2_pdata = csi2->platform_data;
+
+ if (!csi2_pdev) {
+ err = -ENOMEM;
+ goto exit_host_unregister;
+ }
+
+ pcdev->csi2_pdev = csi2_pdev;
+
+ err = platform_device_add_data(csi2_pdev, csi2_pdata, sizeof(*csi2_pdata));
+ if (err < 0)
+ goto exit_pdev_put;
+
+ csi2_pdata = csi2_pdev->dev.platform_data;
+ csi2_pdata->v4l2_dev = &pcdev->ici.v4l2_dev;
+
+ csi2_pdev->resource = csi2->resource;
+ csi2_pdev->num_resources = csi2->num_resources;
+
+ err = platform_device_add(csi2_pdev);
+ if (err < 0)
+ goto exit_pdev_put;
+
+ wait.dev = &csi2_pdev->dev;
+
+ err = bus_register_notifier(&platform_bus_type, &wait.notifier);
+ if (err < 0)
+ goto exit_pdev_unregister;
+
+ /*
+ * From this point the driver module will not unload, until
+ * we complete the completion.
+ */
+
+ if (!csi2_pdev->dev.driver) {
+ complete(&wait.completion);
+ /* Either too late, or probing failed */
+ bus_unregister_notifier(&platform_bus_type, &wait.notifier);
+ err = -ENXIO;
+ goto exit_pdev_unregister;
+ }
+
+ /*
+ * The module is still loaded, in the worst case it is hanging
+ * in device release on our completion. So, _now_ dereferencing
+ * the "owner" is safe!
+ */
+
+ err = try_module_get(csi2_pdev->dev.driver->owner);
+
+ /* Let notifier complete, if it has been locked */
+ complete(&wait.completion);
+ bus_unregister_notifier(&platform_bus_type, &wait.notifier);
+ if (!err) {
+ err = -ENODEV;
+ goto exit_pdev_unregister;
+ }
+ }
+
+ return 0;
+
+exit_pdev_unregister:
+ platform_device_del(pcdev->csi2_pdev);
+exit_pdev_put:
+ pcdev->csi2_pdev->resource = NULL;
+ platform_device_put(pcdev->csi2_pdev);
+exit_host_unregister:
+ soc_camera_host_unregister(&pcdev->ici);
+exit_free_ctx:
+ vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
+exit_free_clk:
+ pm_runtime_disable(&pdev->dev);
+ free_irq(pcdev->irq, pcdev);
+exit_release_mem:
+ if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
+ dma_release_declared_memory(&pdev->dev);
+exit_iounmap:
+ iounmap(base);
+exit_kfree:
+ kfree(pcdev);
+exit:
+ return err;
+}
+
+static int __devexit sh_mobile_ceu_remove(struct platform_device *pdev)
+{
+ struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
+ struct sh_mobile_ceu_dev *pcdev = container_of(soc_host,
+ struct sh_mobile_ceu_dev, ici);
+ struct platform_device *csi2_pdev = pcdev->csi2_pdev;
+
+ soc_camera_host_unregister(soc_host);
+ pm_runtime_disable(&pdev->dev);
+ free_irq(pcdev->irq, pcdev);
+ if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
+ dma_release_declared_memory(&pdev->dev);
+ iounmap(pcdev->base);
+ vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
+ if (csi2_pdev && csi2_pdev->dev.driver) {
+ struct module *csi2_drv = csi2_pdev->dev.driver->owner;
+ platform_device_del(csi2_pdev);
+ csi2_pdev->resource = NULL;
+ platform_device_put(csi2_pdev);
+ module_put(csi2_drv);
+ }
+ kfree(pcdev);
+
+ return 0;
+}
+
+static int sh_mobile_ceu_runtime_nop(struct device *dev)
+{
+ /* Runtime PM callback shared between ->runtime_suspend()
+ * and ->runtime_resume(). Simply returns success.
+ *
+ * This driver re-initializes all registers after
+ * pm_runtime_get_sync() anyway so there is no need
+ * to save and restore registers here.
+ */
+ return 0;
+}
+
+static const struct dev_pm_ops sh_mobile_ceu_dev_pm_ops = {
+ .runtime_suspend = sh_mobile_ceu_runtime_nop,
+ .runtime_resume = sh_mobile_ceu_runtime_nop,
+};
+
+static struct platform_driver sh_mobile_ceu_driver = {
+ .driver = {
+ .name = "sh_mobile_ceu",
+ .pm = &sh_mobile_ceu_dev_pm_ops,
+ },
+ .probe = sh_mobile_ceu_probe,
+ .remove = __devexit_p(sh_mobile_ceu_remove),
+};
+
+static int __init sh_mobile_ceu_init(void)
+{
+ /* Whatever return code */
+ request_module("sh_mobile_csi2");
+ return platform_driver_register(&sh_mobile_ceu_driver);
+}
+
+static void __exit sh_mobile_ceu_exit(void)
+{
+ platform_driver_unregister(&sh_mobile_ceu_driver);
+}
+
+module_init(sh_mobile_ceu_init);
+module_exit(sh_mobile_ceu_exit);
+
+MODULE_DESCRIPTION("SuperH Mobile CEU driver");
+MODULE_AUTHOR("Magnus Damm");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.6");
+MODULE_ALIAS("platform:sh_mobile_ceu");
diff --git a/drivers/media/platform/soc_camera/sh_mobile_csi2.c b/drivers/media/platform/soc_camera/sh_mobile_csi2.c
new file mode 100644
index 0000000..0528650
--- /dev/null
+++ b/drivers/media/platform/soc_camera/sh_mobile_csi2.c
@@ -0,0 +1,398 @@
+/*
+ * Driver for the SH-Mobile MIPI CSI-2 unit
+ *
+ * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <linux/module.h>
+
+#include <media/sh_mobile_ceu.h>
+#include <media/sh_mobile_csi2.h>
+#include <media/soc_camera.h>
+#include <media/soc_mediabus.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mediabus.h>
+#include <media/v4l2-subdev.h>
+
+#define SH_CSI2_TREF 0x00
+#define SH_CSI2_SRST 0x04
+#define SH_CSI2_PHYCNT 0x08
+#define SH_CSI2_CHKSUM 0x0C
+#define SH_CSI2_VCDT 0x10
+
+struct sh_csi2 {
+ struct v4l2_subdev subdev;
+ struct list_head list;
+ unsigned int irq;
+ unsigned long mipi_flags;
+ void __iomem *base;
+ struct platform_device *pdev;
+ struct sh_csi2_client_config *client;
+};
+
+static int sh_csi2_try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev);
+ struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data;
+
+ if (mf->width > 8188)
+ mf->width = 8188;
+ else if (mf->width & 1)
+ mf->width &= ~1;
+
+ switch (pdata->type) {
+ case SH_CSI2C:
+ switch (mf->code) {
+ case V4L2_MBUS_FMT_UYVY8_2X8: /* YUV422 */
+ case V4L2_MBUS_FMT_YUYV8_1_5X8: /* YUV420 */
+ case V4L2_MBUS_FMT_Y8_1X8: /* RAW8 */
+ case V4L2_MBUS_FMT_SBGGR8_1X8:
+ case V4L2_MBUS_FMT_SGRBG8_1X8:
+ break;
+ default:
+ /* All MIPI CSI-2 devices must support one of primary formats */
+ mf->code = V4L2_MBUS_FMT_YUYV8_2X8;
+ }
+ break;
+ case SH_CSI2I:
+ switch (mf->code) {
+ case V4L2_MBUS_FMT_Y8_1X8: /* RAW8 */
+ case V4L2_MBUS_FMT_SBGGR8_1X8:
+ case V4L2_MBUS_FMT_SGRBG8_1X8:
+ case V4L2_MBUS_FMT_SBGGR10_1X10: /* RAW10 */
+ case V4L2_MBUS_FMT_SBGGR12_1X12: /* RAW12 */
+ break;
+ default:
+ /* All MIPI CSI-2 devices must support one of primary formats */
+ mf->code = V4L2_MBUS_FMT_SBGGR8_1X8;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * We have done our best in try_fmt to try and tell the sensor, which formats
+ * we support. If now the configuration is unsuitable for us we can only
+ * error out.
+ */
+static int sh_csi2_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev);
+ u32 tmp = (priv->client->channel & 3) << 8;
+
+ dev_dbg(sd->v4l2_dev->dev, "%s(%u)\n", __func__, mf->code);
+ if (mf->width > 8188 || mf->width & 1)
+ return -EINVAL;
+
+ switch (mf->code) {
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ tmp |= 0x1e; /* YUV422 8 bit */
+ break;
+ case V4L2_MBUS_FMT_YUYV8_1_5X8:
+ tmp |= 0x18; /* YUV420 8 bit */
+ break;
+ case V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE:
+ tmp |= 0x21; /* RGB555 */
+ break;
+ case V4L2_MBUS_FMT_RGB565_2X8_BE:
+ tmp |= 0x22; /* RGB565 */
+ break;
+ case V4L2_MBUS_FMT_Y8_1X8:
+ case V4L2_MBUS_FMT_SBGGR8_1X8:
+ case V4L2_MBUS_FMT_SGRBG8_1X8:
+ tmp |= 0x2a; /* RAW8 */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ iowrite32(tmp, priv->base + SH_CSI2_VCDT);
+
+ return 0;
+}
+
+static int sh_csi2_g_mbus_config(struct v4l2_subdev *sd,
+ struct v4l2_mbus_config *cfg)
+{
+ cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING |
+ V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_MASTER | V4L2_MBUS_DATA_ACTIVE_HIGH;
+ cfg->type = V4L2_MBUS_PARALLEL;
+
+ return 0;
+}
+
+static int sh_csi2_s_mbus_config(struct v4l2_subdev *sd,
+ const struct v4l2_mbus_config *cfg)
+{
+ struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev);
+ struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
+ struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
+ struct v4l2_mbus_config client_cfg = {.type = V4L2_MBUS_CSI2,
+ .flags = priv->mipi_flags};
+
+ return v4l2_subdev_call(client_sd, video, s_mbus_config, &client_cfg);
+}
+
+static struct v4l2_subdev_video_ops sh_csi2_subdev_video_ops = {
+ .s_mbus_fmt = sh_csi2_s_fmt,
+ .try_mbus_fmt = sh_csi2_try_fmt,
+ .g_mbus_config = sh_csi2_g_mbus_config,
+ .s_mbus_config = sh_csi2_s_mbus_config,
+};
+
+static void sh_csi2_hwinit(struct sh_csi2 *priv)
+{
+ struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data;
+ __u32 tmp = 0x10; /* Enable MIPI CSI clock lane */
+
+ /* Reflect registers immediately */
+ iowrite32(0x00000001, priv->base + SH_CSI2_TREF);
+ /* reset CSI2 harware */
+ iowrite32(0x00000001, priv->base + SH_CSI2_SRST);
+ udelay(5);
+ iowrite32(0x00000000, priv->base + SH_CSI2_SRST);
+
+ switch (pdata->type) {
+ case SH_CSI2C:
+ if (priv->client->lanes == 1)
+ tmp |= 1;
+ else
+ /* Default - both lanes */
+ tmp |= 3;
+ break;
+ case SH_CSI2I:
+ if (!priv->client->lanes || priv->client->lanes > 4)
+ /* Default - all 4 lanes */
+ tmp |= 0xf;
+ else
+ tmp |= (1 << priv->client->lanes) - 1;
+ }
+
+ if (priv->client->phy == SH_CSI2_PHY_MAIN)
+ tmp |= 0x8000;
+
+ iowrite32(tmp, priv->base + SH_CSI2_PHYCNT);
+
+ tmp = 0;
+ if (pdata->flags & SH_CSI2_ECC)
+ tmp |= 2;
+ if (pdata->flags & SH_CSI2_CRC)
+ tmp |= 1;
+ iowrite32(tmp, priv->base + SH_CSI2_CHKSUM);
+}
+
+static int sh_csi2_client_connect(struct sh_csi2 *priv)
+{
+ struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data;
+ struct soc_camera_device *icd = v4l2_get_subdev_hostdata(&priv->subdev);
+ struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
+ struct device *dev = v4l2_get_subdevdata(&priv->subdev);
+ struct v4l2_mbus_config cfg;
+ unsigned long common_flags, csi2_flags;
+ int i, ret;
+
+ if (priv->client)
+ return -EBUSY;
+
+ for (i = 0; i < pdata->num_clients; i++)
+ if (&pdata->clients[i].pdev->dev == icd->pdev)
+ break;
+
+ dev_dbg(dev, "%s(%p): found #%d\n", __func__, dev, i);
+
+ if (i == pdata->num_clients)
+ return -ENODEV;
+
+ /* Check if we can support this camera */
+ csi2_flags = V4L2_MBUS_CSI2_CONTINUOUS_CLOCK | V4L2_MBUS_CSI2_1_LANE;
+
+ switch (pdata->type) {
+ case SH_CSI2C:
+ if (pdata->clients[i].lanes != 1)
+ csi2_flags |= V4L2_MBUS_CSI2_2_LANE;
+ break;
+ case SH_CSI2I:
+ switch (pdata->clients[i].lanes) {
+ default:
+ csi2_flags |= V4L2_MBUS_CSI2_4_LANE;
+ case 3:
+ csi2_flags |= V4L2_MBUS_CSI2_3_LANE;
+ case 2:
+ csi2_flags |= V4L2_MBUS_CSI2_2_LANE;
+ }
+ }
+
+ cfg.type = V4L2_MBUS_CSI2;
+ ret = v4l2_subdev_call(client_sd, video, g_mbus_config, &cfg);
+ if (ret == -ENOIOCTLCMD)
+ common_flags = csi2_flags;
+ else if (!ret)
+ common_flags = soc_mbus_config_compatible(&cfg,
+ csi2_flags);
+ else
+ common_flags = 0;
+
+ if (!common_flags)
+ return -EINVAL;
+
+ /* All good: camera MIPI configuration supported */
+ priv->mipi_flags = common_flags;
+ priv->client = pdata->clients + i;
+
+ pm_runtime_get_sync(dev);
+
+ sh_csi2_hwinit(priv);
+
+ return 0;
+}
+
+static void sh_csi2_client_disconnect(struct sh_csi2 *priv)
+{
+ if (!priv->client)
+ return;
+
+ priv->client = NULL;
+
+ pm_runtime_put(v4l2_get_subdevdata(&priv->subdev));
+}
+
+static int sh_csi2_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev);
+
+ if (on)
+ return sh_csi2_client_connect(priv);
+
+ sh_csi2_client_disconnect(priv);
+ return 0;
+}
+
+static struct v4l2_subdev_core_ops sh_csi2_subdev_core_ops = {
+ .s_power = sh_csi2_s_power,
+};
+
+static struct v4l2_subdev_ops sh_csi2_subdev_ops = {
+ .core = &sh_csi2_subdev_core_ops,
+ .video = &sh_csi2_subdev_video_ops,
+};
+
+static __devinit int sh_csi2_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ unsigned int irq;
+ int ret;
+ struct sh_csi2 *priv;
+ /* Platform data specify the PHY, lanes, ECC, CRC */
+ struct sh_csi2_pdata *pdata = pdev->dev.platform_data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ /* Interrupt unused so far */
+ irq = platform_get_irq(pdev, 0);
+
+ if (!res || (int)irq <= 0 || !pdata) {
+ dev_err(&pdev->dev, "Not enough CSI2 platform resources.\n");
+ return -ENODEV;
+ }
+
+ /* TODO: Add support for CSI2I. Careful: different register layout! */
+ if (pdata->type != SH_CSI2C) {
+ dev_err(&pdev->dev, "Only CSI2C supported ATM.\n");
+ return -EINVAL;
+ }
+
+ priv = kzalloc(sizeof(struct sh_csi2), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->irq = irq;
+
+ if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+ dev_err(&pdev->dev, "CSI2 register region already claimed\n");
+ ret = -EBUSY;
+ goto ereqreg;
+ }
+
+ priv->base = ioremap(res->start, resource_size(res));
+ if (!priv->base) {
+ ret = -ENXIO;
+ dev_err(&pdev->dev, "Unable to ioremap CSI2 registers.\n");
+ goto eremap;
+ }
+
+ priv->pdev = pdev;
+ platform_set_drvdata(pdev, priv);
+
+ v4l2_subdev_init(&priv->subdev, &sh_csi2_subdev_ops);
+ v4l2_set_subdevdata(&priv->subdev, &pdev->dev);
+
+ snprintf(priv->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s.mipi-csi",
+ dev_name(pdata->v4l2_dev->dev));
+ ret = v4l2_device_register_subdev(pdata->v4l2_dev, &priv->subdev);
+ dev_dbg(&pdev->dev, "%s(%p): ret(register_subdev) = %d\n", __func__, priv, ret);
+ if (ret < 0)
+ goto esdreg;
+
+ pm_runtime_enable(&pdev->dev);
+
+ dev_dbg(&pdev->dev, "CSI2 probed.\n");
+
+ return 0;
+
+esdreg:
+ iounmap(priv->base);
+eremap:
+ release_mem_region(res->start, resource_size(res));
+ereqreg:
+ kfree(priv);
+
+ return ret;
+}
+
+static __devexit int sh_csi2_remove(struct platform_device *pdev)
+{
+ struct sh_csi2 *priv = platform_get_drvdata(pdev);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ v4l2_device_unregister_subdev(&priv->subdev);
+ pm_runtime_disable(&pdev->dev);
+ iounmap(priv->base);
+ release_mem_region(res->start, resource_size(res));
+ platform_set_drvdata(pdev, NULL);
+ kfree(priv);
+
+ return 0;
+}
+
+static struct platform_driver __refdata sh_csi2_pdrv = {
+ .remove = __devexit_p(sh_csi2_remove),
+ .probe = sh_csi2_probe,
+ .driver = {
+ .name = "sh-mobile-csi2",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(sh_csi2_pdrv);
+
+MODULE_DESCRIPTION("SH-Mobile MIPI CSI-2 driver");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sh-mobile-csi2");
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
new file mode 100644
index 0000000..3be9294
--- /dev/null
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -0,0 +1,1605 @@
+/*
+ * camera image capture (abstract) bus driver
+ *
+ * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+ *
+ * This driver provides an interface between platform-specific camera
+ * busses and camera devices. It should be used if the camera is
+ * connected not over a "proper" bus like PCI or USB, but over a
+ * special bus, like, for example, the Quick Capture interface on PXA270
+ * SoCs. Later it should also be used for i.MX31 SoCs from Freescale.
+ * It can handle multiple cameras and / or multiple busses, which can
+ * be used, e.g., in stereo-vision applications.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/vmalloc.h>
+
+#include <media/soc_camera.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-dev.h>
+#include <media/videobuf-core.h>
+#include <media/videobuf2-core.h>
+#include <media/soc_mediabus.h>
+
+/* Default to VGA resolution */
+#define DEFAULT_WIDTH 640
+#define DEFAULT_HEIGHT 480
+
+#define is_streaming(ici, icd) \
+ (((ici)->ops->init_videobuf) ? \
+ (icd)->vb_vidq.streaming : \
+ vb2_is_streaming(&(icd)->vb2_vidq))
+
+static LIST_HEAD(hosts);
+static LIST_HEAD(devices);
+static DEFINE_MUTEX(list_lock); /* Protects the list of hosts */
+
+int soc_camera_power_on(struct device *dev, struct soc_camera_link *icl)
+{
+ int ret = regulator_bulk_enable(icl->num_regulators,
+ icl->regulators);
+ if (ret < 0) {
+ dev_err(dev, "Cannot enable regulators\n");
+ return ret;
+ }
+
+ if (icl->power) {
+ ret = icl->power(dev, 1);
+ if (ret < 0) {
+ dev_err(dev,
+ "Platform failed to power-on the camera.\n");
+ regulator_bulk_disable(icl->num_regulators,
+ icl->regulators);
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(soc_camera_power_on);
+
+int soc_camera_power_off(struct device *dev, struct soc_camera_link *icl)
+{
+ int ret = 0;
+ int err;
+
+ if (icl->power) {
+ err = icl->power(dev, 0);
+ if (err < 0) {
+ dev_err(dev,
+ "Platform failed to power-off the camera.\n");
+ ret = err;
+ }
+ }
+
+ err = regulator_bulk_disable(icl->num_regulators,
+ icl->regulators);
+ if (err < 0) {
+ dev_err(dev, "Cannot disable regulators\n");
+ ret = ret ? : err;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(soc_camera_power_off);
+
+static int __soc_camera_power_on(struct soc_camera_device *icd)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ int ret;
+
+ ret = v4l2_subdev_call(sd, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ return ret;
+
+ return 0;
+}
+
+static int __soc_camera_power_off(struct soc_camera_device *icd)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ int ret;
+
+ ret = v4l2_subdev_call(sd, core, s_power, 0);
+ if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ return ret;
+
+ return 0;
+}
+
+const struct soc_camera_format_xlate *soc_camera_xlate_by_fourcc(
+ struct soc_camera_device *icd, unsigned int fourcc)
+{
+ unsigned int i;
+
+ for (i = 0; i < icd->num_user_formats; i++)
+ if (icd->user_formats[i].host_fmt->fourcc == fourcc)
+ return icd->user_formats + i;
+ return NULL;
+}
+EXPORT_SYMBOL(soc_camera_xlate_by_fourcc);
+
+/**
+ * soc_camera_apply_board_flags() - apply platform SOCAM_SENSOR_INVERT_* flags
+ * @icl: camera platform parameters
+ * @cfg: media bus configuration
+ * @return: resulting flags
+ */
+unsigned long soc_camera_apply_board_flags(struct soc_camera_link *icl,
+ const struct v4l2_mbus_config *cfg)
+{
+ unsigned long f, flags = cfg->flags;
+
+ /* If only one of the two polarities is supported, switch to the opposite */
+ if (icl->flags & SOCAM_SENSOR_INVERT_HSYNC) {
+ f = flags & (V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW);
+ if (f == V4L2_MBUS_HSYNC_ACTIVE_HIGH || f == V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ flags ^= V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW;
+ }
+
+ if (icl->flags & SOCAM_SENSOR_INVERT_VSYNC) {
+ f = flags & (V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW);
+ if (f == V4L2_MBUS_VSYNC_ACTIVE_HIGH || f == V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ flags ^= V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW;
+ }
+
+ if (icl->flags & SOCAM_SENSOR_INVERT_PCLK) {
+ f = flags & (V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING);
+ if (f == V4L2_MBUS_PCLK_SAMPLE_RISING || f == V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ flags ^= V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING;
+ }
+
+ return flags;
+}
+EXPORT_SYMBOL(soc_camera_apply_board_flags);
+
+#define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \
+ ((x) >> 24) & 0xff
+
+static int soc_camera_try_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ const struct soc_camera_format_xlate *xlate;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ int ret;
+
+ dev_dbg(icd->pdev, "TRY_FMT(%c%c%c%c, %ux%u)\n",
+ pixfmtstr(pix->pixelformat), pix->width, pix->height);
+
+ if (pix->pixelformat != V4L2_PIX_FMT_JPEG &&
+ !(ici->capabilities & SOCAM_HOST_CAP_STRIDE)) {
+ pix->bytesperline = 0;
+ pix->sizeimage = 0;
+ }
+
+ ret = ici->ops->try_fmt(icd, f);
+ if (ret < 0)
+ return ret;
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+ if (!xlate)
+ return -EINVAL;
+
+ ret = soc_mbus_bytes_per_line(pix->width, xlate->host_fmt);
+ if (ret < 0)
+ return ret;
+
+ pix->bytesperline = max_t(u32, pix->bytesperline, ret);
+
+ ret = soc_mbus_image_size(xlate->host_fmt, pix->bytesperline,
+ pix->height);
+ if (ret < 0)
+ return ret;
+
+ pix->sizeimage = max_t(u32, pix->sizeimage, ret);
+
+ return 0;
+}
+
+static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct soc_camera_device *icd = file->private_data;
+
+ WARN_ON(priv != file->private_data);
+
+ /* Only single-plane capture is supported so far */
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ /* limit format to hardware capabilities */
+ return soc_camera_try_fmt(icd, f);
+}
+
+static int soc_camera_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ if (inp->index != 0)
+ return -EINVAL;
+
+ /* default is camera */
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ inp->std = V4L2_STD_UNKNOWN;
+ strcpy(inp->name, "Camera");
+
+ return 0;
+}
+
+static int soc_camera_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+
+ return 0;
+}
+
+static int soc_camera_s_input(struct file *file, void *priv, unsigned int i)
+{
+ if (i > 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int soc_camera_s_std(struct file *file, void *priv, v4l2_std_id *a)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+
+ return v4l2_subdev_call(sd, core, s_std, *a);
+}
+
+static int soc_camera_g_std(struct file *file, void *priv, v4l2_std_id *a)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+
+ return v4l2_subdev_call(sd, core, g_std, a);
+}
+
+static int soc_camera_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ return ici->ops->enum_framesizes(icd, fsize);
+}
+
+static int soc_camera_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *p)
+{
+ int ret;
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ WARN_ON(priv != file->private_data);
+
+ if (icd->streamer && icd->streamer != file)
+ return -EBUSY;
+
+ if (ici->ops->init_videobuf) {
+ ret = videobuf_reqbufs(&icd->vb_vidq, p);
+ if (ret < 0)
+ return ret;
+
+ ret = ici->ops->reqbufs(icd, p);
+ } else {
+ ret = vb2_reqbufs(&icd->vb2_vidq, p);
+ }
+
+ if (!ret && !icd->streamer)
+ icd->streamer = file;
+
+ return ret;
+}
+
+static int soc_camera_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *p)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ WARN_ON(priv != file->private_data);
+
+ if (ici->ops->init_videobuf)
+ return videobuf_querybuf(&icd->vb_vidq, p);
+ else
+ return vb2_querybuf(&icd->vb2_vidq, p);
+}
+
+static int soc_camera_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *p)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ WARN_ON(priv != file->private_data);
+
+ if (icd->streamer != file)
+ return -EBUSY;
+
+ if (ici->ops->init_videobuf)
+ return videobuf_qbuf(&icd->vb_vidq, p);
+ else
+ return vb2_qbuf(&icd->vb2_vidq, p);
+}
+
+static int soc_camera_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *p)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ WARN_ON(priv != file->private_data);
+
+ if (icd->streamer != file)
+ return -EBUSY;
+
+ if (ici->ops->init_videobuf)
+ return videobuf_dqbuf(&icd->vb_vidq, p, file->f_flags & O_NONBLOCK);
+ else
+ return vb2_dqbuf(&icd->vb2_vidq, p, file->f_flags & O_NONBLOCK);
+}
+
+static int soc_camera_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *create)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ /* videobuf2 only */
+ if (ici->ops->init_videobuf)
+ return -EINVAL;
+ else
+ return vb2_create_bufs(&icd->vb2_vidq, create);
+}
+
+static int soc_camera_prepare_buf(struct file *file, void *priv,
+ struct v4l2_buffer *b)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ /* videobuf2 only */
+ if (ici->ops->init_videobuf)
+ return -EINVAL;
+ else
+ return vb2_prepare_buf(&icd->vb2_vidq, b);
+}
+
+/* Always entered with .video_lock held */
+static int soc_camera_init_user_formats(struct soc_camera_device *icd)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ unsigned int i, fmts = 0, raw_fmts = 0;
+ int ret;
+ enum v4l2_mbus_pixelcode code;
+
+ while (!v4l2_subdev_call(sd, video, enum_mbus_fmt, raw_fmts, &code))
+ raw_fmts++;
+
+ if (!ici->ops->get_formats)
+ /*
+ * Fallback mode - the host will have to serve all
+ * sensor-provided formats one-to-one to the user
+ */
+ fmts = raw_fmts;
+ else
+ /*
+ * First pass - only count formats this host-sensor
+ * configuration can provide
+ */
+ for (i = 0; i < raw_fmts; i++) {
+ ret = ici->ops->get_formats(icd, i, NULL);
+ if (ret < 0)
+ return ret;
+ fmts += ret;
+ }
+
+ if (!fmts)
+ return -ENXIO;
+
+ icd->user_formats =
+ vmalloc(fmts * sizeof(struct soc_camera_format_xlate));
+ if (!icd->user_formats)
+ return -ENOMEM;
+
+ dev_dbg(icd->pdev, "Found %d supported formats.\n", fmts);
+
+ /* Second pass - actually fill data formats */
+ fmts = 0;
+ for (i = 0; i < raw_fmts; i++)
+ if (!ici->ops->get_formats) {
+ v4l2_subdev_call(sd, video, enum_mbus_fmt, i, &code);
+ icd->user_formats[fmts].host_fmt =
+ soc_mbus_get_fmtdesc(code);
+ if (icd->user_formats[fmts].host_fmt)
+ icd->user_formats[fmts++].code = code;
+ } else {
+ ret = ici->ops->get_formats(icd, i,
+ &icd->user_formats[fmts]);
+ if (ret < 0)
+ goto egfmt;
+ fmts += ret;
+ }
+
+ icd->num_user_formats = fmts;
+ icd->current_fmt = &icd->user_formats[0];
+
+ return 0;
+
+egfmt:
+ vfree(icd->user_formats);
+ return ret;
+}
+
+/* Always entered with .video_lock held */
+static void soc_camera_free_user_formats(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ if (ici->ops->put_formats)
+ ici->ops->put_formats(icd);
+ icd->current_fmt = NULL;
+ icd->num_user_formats = 0;
+ vfree(icd->user_formats);
+ icd->user_formats = NULL;
+}
+
+/* Called with .vb_lock held, or from the first open(2), see comment there */
+static int soc_camera_set_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ int ret;
+
+ dev_dbg(icd->pdev, "S_FMT(%c%c%c%c, %ux%u)\n",
+ pixfmtstr(pix->pixelformat), pix->width, pix->height);
+
+ /* We always call try_fmt() before set_fmt() or set_crop() */
+ ret = soc_camera_try_fmt(icd, f);
+ if (ret < 0)
+ return ret;
+
+ ret = ici->ops->set_fmt(icd, f);
+ if (ret < 0) {
+ return ret;
+ } else if (!icd->current_fmt ||
+ icd->current_fmt->host_fmt->fourcc != pix->pixelformat) {
+ dev_err(icd->pdev,
+ "Host driver hasn't set up current format correctly!\n");
+ return -EINVAL;
+ }
+
+ icd->user_width = pix->width;
+ icd->user_height = pix->height;
+ icd->bytesperline = pix->bytesperline;
+ icd->sizeimage = pix->sizeimage;
+ icd->colorspace = pix->colorspace;
+ icd->field = pix->field;
+ if (ici->ops->init_videobuf)
+ icd->vb_vidq.field = pix->field;
+
+ dev_dbg(icd->pdev, "set width: %d height: %d\n",
+ icd->user_width, icd->user_height);
+
+ /* set physical bus parameters */
+ return ici->ops->set_bus_param(icd);
+}
+
+static int soc_camera_open(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct soc_camera_device *icd = dev_get_drvdata(vdev->parent);
+ struct soc_camera_link *icl = to_soc_camera_link(icd);
+ struct soc_camera_host *ici;
+ int ret;
+
+ if (!to_soc_camera_control(icd))
+ /* No device driver attached */
+ return -ENODEV;
+
+ ici = to_soc_camera_host(icd->parent);
+
+ if (mutex_lock_interruptible(&icd->video_lock))
+ return -ERESTARTSYS;
+ if (!try_module_get(ici->ops->owner)) {
+ dev_err(icd->pdev, "Couldn't lock capture bus driver.\n");
+ ret = -EINVAL;
+ goto emodule;
+ }
+
+ icd->use_count++;
+
+ /* Now we really have to activate the camera */
+ if (icd->use_count == 1) {
+ /* Restore parameters before the last close() per V4L2 API */
+ struct v4l2_format f = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .fmt.pix = {
+ .width = icd->user_width,
+ .height = icd->user_height,
+ .field = icd->field,
+ .colorspace = icd->colorspace,
+ .pixelformat =
+ icd->current_fmt->host_fmt->fourcc,
+ },
+ };
+
+ /* The camera could have been already on, try to reset */
+ if (icl->reset)
+ icl->reset(icd->pdev);
+
+ /* Don't mess with the host during probe */
+ mutex_lock(&ici->host_lock);
+ ret = ici->ops->add(icd);
+ mutex_unlock(&ici->host_lock);
+ if (ret < 0) {
+ dev_err(icd->pdev, "Couldn't activate the camera: %d\n", ret);
+ goto eiciadd;
+ }
+
+ ret = __soc_camera_power_on(icd);
+ if (ret < 0)
+ goto epower;
+
+ pm_runtime_enable(&icd->vdev->dev);
+ ret = pm_runtime_resume(&icd->vdev->dev);
+ if (ret < 0 && ret != -ENOSYS)
+ goto eresume;
+
+ /*
+ * Try to configure with default parameters. Notice: this is the
+ * very first open, so, we cannot race against other calls,
+ * apart from someone else calling open() simultaneously, but
+ * .video_lock is protecting us against it.
+ */
+ ret = soc_camera_set_fmt(icd, &f);
+ if (ret < 0)
+ goto esfmt;
+
+ if (ici->ops->init_videobuf) {
+ ici->ops->init_videobuf(&icd->vb_vidq, icd);
+ } else {
+ ret = ici->ops->init_videobuf2(&icd->vb2_vidq, icd);
+ if (ret < 0)
+ goto einitvb;
+ }
+ v4l2_ctrl_handler_setup(&icd->ctrl_handler);
+ }
+ mutex_unlock(&icd->video_lock);
+
+ file->private_data = icd;
+ dev_dbg(icd->pdev, "camera device open\n");
+
+ return 0;
+
+ /*
+ * First four errors are entered with the .video_lock held
+ * and use_count == 1
+ */
+einitvb:
+esfmt:
+ pm_runtime_disable(&icd->vdev->dev);
+eresume:
+ __soc_camera_power_off(icd);
+epower:
+ ici->ops->remove(icd);
+eiciadd:
+ icd->use_count--;
+ module_put(ici->ops->owner);
+emodule:
+ mutex_unlock(&icd->video_lock);
+
+ return ret;
+}
+
+static int soc_camera_close(struct file *file)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ mutex_lock(&icd->video_lock);
+ icd->use_count--;
+ if (!icd->use_count) {
+ pm_runtime_suspend(&icd->vdev->dev);
+ pm_runtime_disable(&icd->vdev->dev);
+
+ if (ici->ops->init_videobuf2)
+ vb2_queue_release(&icd->vb2_vidq);
+ ici->ops->remove(icd);
+
+ __soc_camera_power_off(icd);
+ }
+
+ if (icd->streamer == file)
+ icd->streamer = NULL;
+ mutex_unlock(&icd->video_lock);
+
+ module_put(ici->ops->owner);
+
+ dev_dbg(icd->pdev, "camera device close\n");
+
+ return 0;
+}
+
+static ssize_t soc_camera_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct soc_camera_device *icd = file->private_data;
+ int err = -EINVAL;
+
+ dev_err(icd->pdev, "camera device read not implemented\n");
+
+ return err;
+}
+
+static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ int err;
+
+ dev_dbg(icd->pdev, "mmap called, vma=0x%08lx\n", (unsigned long)vma);
+
+ if (icd->streamer != file)
+ return -EBUSY;
+
+ if (mutex_lock_interruptible(&icd->video_lock))
+ return -ERESTARTSYS;
+ if (ici->ops->init_videobuf)
+ err = videobuf_mmap_mapper(&icd->vb_vidq, vma);
+ else
+ err = vb2_mmap(&icd->vb2_vidq, vma);
+ mutex_unlock(&icd->video_lock);
+
+ dev_dbg(icd->pdev, "vma start=0x%08lx, size=%ld, ret=%d\n",
+ (unsigned long)vma->vm_start,
+ (unsigned long)vma->vm_end - (unsigned long)vma->vm_start,
+ err);
+
+ return err;
+}
+
+static unsigned int soc_camera_poll(struct file *file, poll_table *pt)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ unsigned res = POLLERR;
+
+ if (icd->streamer != file)
+ return POLLERR;
+
+ mutex_lock(&icd->video_lock);
+ if (ici->ops->init_videobuf && list_empty(&icd->vb_vidq.stream))
+ dev_err(icd->pdev, "Trying to poll with no queued buffers!\n");
+ else
+ res = ici->ops->poll(file, pt);
+ mutex_unlock(&icd->video_lock);
+ return res;
+}
+
+void soc_camera_lock(struct vb2_queue *vq)
+{
+ struct soc_camera_device *icd = vb2_get_drv_priv(vq);
+ mutex_lock(&icd->video_lock);
+}
+EXPORT_SYMBOL(soc_camera_lock);
+
+void soc_camera_unlock(struct vb2_queue *vq)
+{
+ struct soc_camera_device *icd = vb2_get_drv_priv(vq);
+ mutex_unlock(&icd->video_lock);
+}
+EXPORT_SYMBOL(soc_camera_unlock);
+
+static struct v4l2_file_operations soc_camera_fops = {
+ .owner = THIS_MODULE,
+ .open = soc_camera_open,
+ .release = soc_camera_close,
+ .unlocked_ioctl = video_ioctl2,
+ .read = soc_camera_read,
+ .mmap = soc_camera_mmap,
+ .poll = soc_camera_poll,
+};
+
+static int soc_camera_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct soc_camera_device *icd = file->private_data;
+ int ret;
+
+ WARN_ON(priv != file->private_data);
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ dev_warn(icd->pdev, "Wrong buf-type %d\n", f->type);
+ return -EINVAL;
+ }
+
+ if (icd->streamer && icd->streamer != file)
+ return -EBUSY;
+
+ if (is_streaming(to_soc_camera_host(icd->parent), icd)) {
+ dev_err(icd->pdev, "S_FMT denied: queue initialised\n");
+ return -EBUSY;
+ }
+
+ ret = soc_camera_set_fmt(icd, f);
+
+ if (!ret && !icd->streamer)
+ icd->streamer = file;
+
+ return ret;
+}
+
+static int soc_camera_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct soc_camera_device *icd = file->private_data;
+ const struct soc_mbus_pixelfmt *format;
+
+ WARN_ON(priv != file->private_data);
+
+ if (f->index >= icd->num_user_formats)
+ return -EINVAL;
+
+ format = icd->user_formats[f->index].host_fmt;
+
+ if (format->name)
+ strlcpy(f->description, format->name, sizeof(f->description));
+ f->pixelformat = format->fourcc;
+ return 0;
+}
+
+static int soc_camera_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+
+ WARN_ON(priv != file->private_data);
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ pix->width = icd->user_width;
+ pix->height = icd->user_height;
+ pix->bytesperline = icd->bytesperline;
+ pix->sizeimage = icd->sizeimage;
+ pix->field = icd->field;
+ pix->pixelformat = icd->current_fmt->host_fmt->fourcc;
+ pix->colorspace = icd->colorspace;
+ dev_dbg(icd->pdev, "current_fmt->fourcc: 0x%08x\n",
+ icd->current_fmt->host_fmt->fourcc);
+ return 0;
+}
+
+static int soc_camera_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ WARN_ON(priv != file->private_data);
+
+ strlcpy(cap->driver, ici->drv_name, sizeof(cap->driver));
+ return ici->ops->querycap(ici, cap);
+}
+
+static int soc_camera_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type i)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ int ret;
+
+ WARN_ON(priv != file->private_data);
+
+ if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (icd->streamer != file)
+ return -EBUSY;
+
+ /* This calls buf_queue from host driver's videobuf_queue_ops */
+ if (ici->ops->init_videobuf)
+ ret = videobuf_streamon(&icd->vb_vidq);
+ else
+ ret = vb2_streamon(&icd->vb2_vidq, i);
+
+ if (!ret)
+ v4l2_subdev_call(sd, video, s_stream, 1);
+
+ return ret;
+}
+
+static int soc_camera_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type i)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ WARN_ON(priv != file->private_data);
+
+ if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (icd->streamer != file)
+ return -EBUSY;
+
+ /*
+ * This calls buf_release from host driver's videobuf_queue_ops for all
+ * remaining buffers. When the last buffer is freed, stop capture
+ */
+ if (ici->ops->init_videobuf)
+ videobuf_streamoff(&icd->vb_vidq);
+ else
+ vb2_streamoff(&icd->vb2_vidq, i);
+
+ v4l2_subdev_call(sd, video, s_stream, 0);
+
+ return 0;
+}
+
+static int soc_camera_cropcap(struct file *file, void *fh,
+ struct v4l2_cropcap *a)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ return ici->ops->cropcap(icd, a);
+}
+
+static int soc_camera_g_crop(struct file *file, void *fh,
+ struct v4l2_crop *a)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ int ret;
+
+ ret = ici->ops->get_crop(icd, a);
+
+ return ret;
+}
+
+/*
+ * According to the V4L2 API, drivers shall not update the struct v4l2_crop
+ * argument with the actual geometry, instead, the user shall use G_CROP to
+ * retrieve it.
+ */
+static int soc_camera_s_crop(struct file *file, void *fh,
+ const struct v4l2_crop *a)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ const struct v4l2_rect *rect = &a->c;
+ struct v4l2_crop current_crop;
+ int ret;
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ dev_dbg(icd->pdev, "S_CROP(%ux%u@%u:%u)\n",
+ rect->width, rect->height, rect->left, rect->top);
+
+ /* If get_crop fails, we'll let host and / or client drivers decide */
+ ret = ici->ops->get_crop(icd, &current_crop);
+
+ /* Prohibit window size change with initialised buffers */
+ if (ret < 0) {
+ dev_err(icd->pdev,
+ "S_CROP denied: getting current crop failed\n");
+ } else if ((a->c.width == current_crop.c.width &&
+ a->c.height == current_crop.c.height) ||
+ !is_streaming(ici, icd)) {
+ /* same size or not streaming - use .set_crop() */
+ ret = ici->ops->set_crop(icd, a);
+ } else if (ici->ops->set_livecrop) {
+ ret = ici->ops->set_livecrop(icd, a);
+ } else {
+ dev_err(icd->pdev,
+ "S_CROP denied: queue initialised and sizes differ\n");
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+
+static int soc_camera_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ /* With a wrong type no need to try to fall back to cropping */
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (!ici->ops->get_selection)
+ return -ENOTTY;
+
+ return ici->ops->get_selection(icd, s);
+}
+
+static int soc_camera_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ int ret;
+
+ /* In all these cases cropping emulation will not help */
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ (s->target != V4L2_SEL_TGT_COMPOSE_ACTIVE &&
+ s->target != V4L2_SEL_TGT_CROP_ACTIVE))
+ return -EINVAL;
+
+ if (s->target == V4L2_SEL_TGT_COMPOSE_ACTIVE) {
+ /* No output size change during a running capture! */
+ if (is_streaming(ici, icd) &&
+ (icd->user_width != s->r.width ||
+ icd->user_height != s->r.height))
+ return -EBUSY;
+
+ /*
+ * Only one user is allowed to change the output format, touch
+ * buffers, start / stop streaming, poll for data
+ */
+ if (icd->streamer && icd->streamer != file)
+ return -EBUSY;
+ }
+
+ if (!ici->ops->set_selection)
+ return -ENOTTY;
+
+ ret = ici->ops->set_selection(icd, s);
+ if (!ret &&
+ s->target == V4L2_SEL_TGT_COMPOSE_ACTIVE) {
+ icd->user_width = s->r.width;
+ icd->user_height = s->r.height;
+ if (!icd->streamer)
+ icd->streamer = file;
+ }
+
+ return ret;
+}
+
+static int soc_camera_g_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ if (ici->ops->get_parm)
+ return ici->ops->get_parm(icd, a);
+
+ return -ENOIOCTLCMD;
+}
+
+static int soc_camera_s_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ if (ici->ops->set_parm)
+ return ici->ops->set_parm(icd, a);
+
+ return -ENOIOCTLCMD;
+}
+
+static int soc_camera_g_chip_ident(struct file *file, void *fh,
+ struct v4l2_dbg_chip_ident *id)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+
+ return v4l2_subdev_call(sd, core, g_chip_ident, id);
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int soc_camera_g_register(struct file *file, void *fh,
+ struct v4l2_dbg_register *reg)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+
+ return v4l2_subdev_call(sd, core, g_register, reg);
+}
+
+static int soc_camera_s_register(struct file *file, void *fh,
+ struct v4l2_dbg_register *reg)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+
+ return v4l2_subdev_call(sd, core, s_register, reg);
+}
+#endif
+
+static int soc_camera_probe(struct soc_camera_device *icd);
+
+/* So far this function cannot fail */
+static void scan_add_host(struct soc_camera_host *ici)
+{
+ struct soc_camera_device *icd;
+
+ mutex_lock(&ici->host_lock);
+
+ list_for_each_entry(icd, &devices, list) {
+ if (icd->iface == ici->nr) {
+ int ret;
+
+ icd->parent = ici->v4l2_dev.dev;
+ ret = soc_camera_probe(icd);
+ }
+ }
+
+ mutex_unlock(&ici->host_lock);
+}
+
+#ifdef CONFIG_I2C_BOARDINFO
+static int soc_camera_init_i2c(struct soc_camera_device *icd,
+ struct soc_camera_link *icl)
+{
+ struct i2c_client *client;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct i2c_adapter *adap = i2c_get_adapter(icl->i2c_adapter_id);
+ struct v4l2_subdev *subdev;
+
+ if (!adap) {
+ dev_err(icd->pdev, "Cannot get I2C adapter #%d. No driver?\n",
+ icl->i2c_adapter_id);
+ goto ei2cga;
+ }
+
+ icl->board_info->platform_data = icl;
+
+ subdev = v4l2_i2c_new_subdev_board(&ici->v4l2_dev, adap,
+ icl->board_info, NULL);
+ if (!subdev)
+ goto ei2cnd;
+
+ client = v4l2_get_subdevdata(subdev);
+
+ /* Use to_i2c_client(dev) to recover the i2c client */
+ icd->control = &client->dev;
+
+ return 0;
+ei2cnd:
+ i2c_put_adapter(adap);
+ei2cga:
+ return -ENODEV;
+}
+
+static void soc_camera_free_i2c(struct soc_camera_device *icd)
+{
+ struct i2c_client *client =
+ to_i2c_client(to_soc_camera_control(icd));
+ struct i2c_adapter *adap = client->adapter;
+
+ icd->control = NULL;
+ v4l2_device_unregister_subdev(i2c_get_clientdata(client));
+ i2c_unregister_device(client);
+ i2c_put_adapter(adap);
+}
+#else
+#define soc_camera_init_i2c(icd, icl) (-ENODEV)
+#define soc_camera_free_i2c(icd) do {} while (0)
+#endif
+
+static int soc_camera_video_start(struct soc_camera_device *icd);
+static int video_dev_create(struct soc_camera_device *icd);
+/* Called during host-driver probe */
+static int soc_camera_probe(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct soc_camera_link *icl = to_soc_camera_link(icd);
+ struct device *control = NULL;
+ struct v4l2_subdev *sd;
+ struct v4l2_mbus_framefmt mf;
+ int ret;
+
+ dev_info(icd->pdev, "Probing %s\n", dev_name(icd->pdev));
+
+ /*
+ * Currently the subdev with the largest number of controls (13) is
+ * ov6550. So let's pick 16 as a hint for the control handler. Note
+ * that this is a hint only: too large and you waste some memory, too
+ * small and there is a (very) small performance hit when looking up
+ * controls in the internal hash.
+ */
+ ret = v4l2_ctrl_handler_init(&icd->ctrl_handler, 16);
+ if (ret < 0)
+ return ret;
+
+ ret = regulator_bulk_get(icd->pdev, icl->num_regulators,
+ icl->regulators);
+ if (ret < 0)
+ goto ereg;
+
+ /* The camera could have been already on, try to reset */
+ if (icl->reset)
+ icl->reset(icd->pdev);
+
+ ret = ici->ops->add(icd);
+ if (ret < 0)
+ goto eadd;
+
+ /* Must have icd->vdev before registering the device */
+ ret = video_dev_create(icd);
+ if (ret < 0)
+ goto evdc;
+
+ /* Non-i2c cameras, e.g., soc_camera_platform, have no board_info */
+ if (icl->board_info) {
+ ret = soc_camera_init_i2c(icd, icl);
+ if (ret < 0)
+ goto eadddev;
+ } else if (!icl->add_device || !icl->del_device) {
+ ret = -EINVAL;
+ goto eadddev;
+ } else {
+ if (icl->module_name)
+ ret = request_module(icl->module_name);
+
+ ret = icl->add_device(icd);
+ if (ret < 0)
+ goto eadddev;
+
+ /*
+ * FIXME: this is racy, have to use driver-binding notification,
+ * when it is available
+ */
+ control = to_soc_camera_control(icd);
+ if (!control || !control->driver || !dev_get_drvdata(control) ||
+ !try_module_get(control->driver->owner)) {
+ icl->del_device(icd);
+ ret = -ENODEV;
+ goto enodrv;
+ }
+ }
+
+ sd = soc_camera_to_subdev(icd);
+ sd->grp_id = soc_camera_grp_id(icd);
+ v4l2_set_subdev_hostdata(sd, icd);
+
+ if (v4l2_ctrl_add_handler(&icd->ctrl_handler, sd->ctrl_handler, NULL))
+ goto ectrl;
+
+ /* At this point client .probe() should have run already */
+ ret = soc_camera_init_user_formats(icd);
+ if (ret < 0)
+ goto eiufmt;
+
+ icd->field = V4L2_FIELD_ANY;
+
+ /*
+ * ..._video_start() will create a device node, video_register_device()
+ * itself is protected against concurrent open() calls, but we also have
+ * to protect our data.
+ */
+ mutex_lock(&icd->video_lock);
+
+ ret = soc_camera_video_start(icd);
+ if (ret < 0)
+ goto evidstart;
+
+ /* Try to improve our guess of a reasonable window format */
+ if (!v4l2_subdev_call(sd, video, g_mbus_fmt, &mf)) {
+ icd->user_width = mf.width;
+ icd->user_height = mf.height;
+ icd->colorspace = mf.colorspace;
+ icd->field = mf.field;
+ }
+
+ ici->ops->remove(icd);
+
+ mutex_unlock(&icd->video_lock);
+
+ return 0;
+
+evidstart:
+ mutex_unlock(&icd->video_lock);
+ soc_camera_free_user_formats(icd);
+eiufmt:
+ectrl:
+ if (icl->board_info) {
+ soc_camera_free_i2c(icd);
+ } else {
+ icl->del_device(icd);
+ module_put(control->driver->owner);
+ }
+enodrv:
+eadddev:
+ video_device_release(icd->vdev);
+ icd->vdev = NULL;
+evdc:
+ ici->ops->remove(icd);
+eadd:
+ regulator_bulk_free(icl->num_regulators, icl->regulators);
+ereg:
+ v4l2_ctrl_handler_free(&icd->ctrl_handler);
+ return ret;
+}
+
+/*
+ * This is called on device_unregister, which only means we have to disconnect
+ * from the host, but not remove ourselves from the device list
+ */
+static int soc_camera_remove(struct soc_camera_device *icd)
+{
+ struct soc_camera_link *icl = to_soc_camera_link(icd);
+ struct video_device *vdev = icd->vdev;
+
+ BUG_ON(!icd->parent);
+
+ v4l2_ctrl_handler_free(&icd->ctrl_handler);
+ if (vdev) {
+ video_unregister_device(vdev);
+ icd->vdev = NULL;
+ }
+
+ if (icl->board_info) {
+ soc_camera_free_i2c(icd);
+ } else {
+ struct device_driver *drv = to_soc_camera_control(icd)->driver;
+ if (drv) {
+ icl->del_device(icd);
+ module_put(drv->owner);
+ }
+ }
+ soc_camera_free_user_formats(icd);
+
+ regulator_bulk_free(icl->num_regulators, icl->regulators);
+
+ return 0;
+}
+
+static int default_cropcap(struct soc_camera_device *icd,
+ struct v4l2_cropcap *a)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ return v4l2_subdev_call(sd, video, cropcap, a);
+}
+
+static int default_g_crop(struct soc_camera_device *icd, struct v4l2_crop *a)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ return v4l2_subdev_call(sd, video, g_crop, a);
+}
+
+static int default_s_crop(struct soc_camera_device *icd, const struct v4l2_crop *a)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ return v4l2_subdev_call(sd, video, s_crop, a);
+}
+
+static int default_g_parm(struct soc_camera_device *icd,
+ struct v4l2_streamparm *parm)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ return v4l2_subdev_call(sd, video, g_parm, parm);
+}
+
+static int default_s_parm(struct soc_camera_device *icd,
+ struct v4l2_streamparm *parm)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ return v4l2_subdev_call(sd, video, s_parm, parm);
+}
+
+static int default_enum_framesizes(struct soc_camera_device *icd,
+ struct v4l2_frmsizeenum *fsize)
+{
+ int ret;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ const struct soc_camera_format_xlate *xlate;
+ __u32 pixfmt = fsize->pixel_format;
+ struct v4l2_frmsizeenum fsize_mbus = *fsize;
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+ if (!xlate)
+ return -EINVAL;
+ /* map xlate-code to pixel_format, sensor only handle xlate-code*/
+ fsize_mbus.pixel_format = xlate->code;
+
+ ret = v4l2_subdev_call(sd, video, enum_framesizes, &fsize_mbus);
+ if (ret < 0)
+ return ret;
+
+ *fsize = fsize_mbus;
+ fsize->pixel_format = pixfmt;
+
+ return 0;
+}
+
+int soc_camera_host_register(struct soc_camera_host *ici)
+{
+ struct soc_camera_host *ix;
+ int ret;
+
+ if (!ici || !ici->ops ||
+ !ici->ops->try_fmt ||
+ !ici->ops->set_fmt ||
+ !ici->ops->set_bus_param ||
+ !ici->ops->querycap ||
+ ((!ici->ops->init_videobuf ||
+ !ici->ops->reqbufs) &&
+ !ici->ops->init_videobuf2) ||
+ !ici->ops->add ||
+ !ici->ops->remove ||
+ !ici->ops->poll ||
+ !ici->v4l2_dev.dev)
+ return -EINVAL;
+
+ if (!ici->ops->set_crop)
+ ici->ops->set_crop = default_s_crop;
+ if (!ici->ops->get_crop)
+ ici->ops->get_crop = default_g_crop;
+ if (!ici->ops->cropcap)
+ ici->ops->cropcap = default_cropcap;
+ if (!ici->ops->set_parm)
+ ici->ops->set_parm = default_s_parm;
+ if (!ici->ops->get_parm)
+ ici->ops->get_parm = default_g_parm;
+ if (!ici->ops->enum_framesizes)
+ ici->ops->enum_framesizes = default_enum_framesizes;
+
+ mutex_lock(&list_lock);
+ list_for_each_entry(ix, &hosts, list) {
+ if (ix->nr == ici->nr) {
+ ret = -EBUSY;
+ goto edevreg;
+ }
+ }
+
+ ret = v4l2_device_register(ici->v4l2_dev.dev, &ici->v4l2_dev);
+ if (ret < 0)
+ goto edevreg;
+
+ list_add_tail(&ici->list, &hosts);
+ mutex_unlock(&list_lock);
+
+ mutex_init(&ici->host_lock);
+ scan_add_host(ici);
+
+ return 0;
+
+edevreg:
+ mutex_unlock(&list_lock);
+ return ret;
+}
+EXPORT_SYMBOL(soc_camera_host_register);
+
+/* Unregister all clients! */
+void soc_camera_host_unregister(struct soc_camera_host *ici)
+{
+ struct soc_camera_device *icd;
+
+ mutex_lock(&list_lock);
+
+ list_del(&ici->list);
+ list_for_each_entry(icd, &devices, list)
+ if (icd->iface == ici->nr && to_soc_camera_control(icd))
+ soc_camera_remove(icd);
+
+ mutex_unlock(&list_lock);
+
+ v4l2_device_unregister(&ici->v4l2_dev);
+}
+EXPORT_SYMBOL(soc_camera_host_unregister);
+
+/* Image capture device */
+static int soc_camera_device_register(struct soc_camera_device *icd)
+{
+ struct soc_camera_device *ix;
+ int num = -1, i;
+
+ for (i = 0; i < 256 && num < 0; i++) {
+ num = i;
+ /* Check if this index is available on this interface */
+ list_for_each_entry(ix, &devices, list) {
+ if (ix->iface == icd->iface && ix->devnum == i) {
+ num = -1;
+ break;
+ }
+ }
+ }
+
+ if (num < 0)
+ /*
+ * ok, we have 256 cameras on this host...
+ * man, stay reasonable...
+ */
+ return -ENOMEM;
+
+ icd->devnum = num;
+ icd->use_count = 0;
+ icd->host_priv = NULL;
+ mutex_init(&icd->video_lock);
+
+ list_add_tail(&icd->list, &devices);
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops soc_camera_ioctl_ops = {
+ .vidioc_querycap = soc_camera_querycap,
+ .vidioc_try_fmt_vid_cap = soc_camera_try_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = soc_camera_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = soc_camera_s_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = soc_camera_enum_fmt_vid_cap,
+ .vidioc_enum_input = soc_camera_enum_input,
+ .vidioc_g_input = soc_camera_g_input,
+ .vidioc_s_input = soc_camera_s_input,
+ .vidioc_s_std = soc_camera_s_std,
+ .vidioc_g_std = soc_camera_g_std,
+ .vidioc_enum_framesizes = soc_camera_enum_framesizes,
+ .vidioc_reqbufs = soc_camera_reqbufs,
+ .vidioc_querybuf = soc_camera_querybuf,
+ .vidioc_qbuf = soc_camera_qbuf,
+ .vidioc_dqbuf = soc_camera_dqbuf,
+ .vidioc_create_bufs = soc_camera_create_bufs,
+ .vidioc_prepare_buf = soc_camera_prepare_buf,
+ .vidioc_streamon = soc_camera_streamon,
+ .vidioc_streamoff = soc_camera_streamoff,
+ .vidioc_cropcap = soc_camera_cropcap,
+ .vidioc_g_crop = soc_camera_g_crop,
+ .vidioc_s_crop = soc_camera_s_crop,
+ .vidioc_g_selection = soc_camera_g_selection,
+ .vidioc_s_selection = soc_camera_s_selection,
+ .vidioc_g_parm = soc_camera_g_parm,
+ .vidioc_s_parm = soc_camera_s_parm,
+ .vidioc_g_chip_ident = soc_camera_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .vidioc_g_register = soc_camera_g_register,
+ .vidioc_s_register = soc_camera_s_register,
+#endif
+};
+
+static int video_dev_create(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct video_device *vdev = video_device_alloc();
+
+ if (!vdev)
+ return -ENOMEM;
+
+ strlcpy(vdev->name, ici->drv_name, sizeof(vdev->name));
+
+ vdev->parent = icd->pdev;
+ vdev->current_norm = V4L2_STD_UNKNOWN;
+ vdev->fops = &soc_camera_fops;
+ vdev->ioctl_ops = &soc_camera_ioctl_ops;
+ vdev->release = video_device_release;
+ vdev->tvnorms = V4L2_STD_UNKNOWN;
+ vdev->ctrl_handler = &icd->ctrl_handler;
+ vdev->lock = &icd->video_lock;
+
+ icd->vdev = vdev;
+
+ return 0;
+}
+
+/*
+ * Called from soc_camera_probe() above (with .video_lock held???)
+ */
+static int soc_camera_video_start(struct soc_camera_device *icd)
+{
+ const struct device_type *type = icd->vdev->dev.type;
+ int ret;
+
+ if (!icd->parent)
+ return -ENODEV;
+
+ ret = video_register_device(icd->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ dev_err(icd->pdev, "video_register_device failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Restore device type, possibly set by the subdevice driver */
+ icd->vdev->dev.type = type;
+
+ return 0;
+}
+
+static int __devinit soc_camera_pdrv_probe(struct platform_device *pdev)
+{
+ struct soc_camera_link *icl = pdev->dev.platform_data;
+ struct soc_camera_device *icd;
+ int ret;
+
+ if (!icl)
+ return -EINVAL;
+
+ icd = kzalloc(sizeof(*icd), GFP_KERNEL);
+ if (!icd)
+ return -ENOMEM;
+
+ icd->iface = icl->bus_id;
+ icd->link = icl;
+ icd->pdev = &pdev->dev;
+ platform_set_drvdata(pdev, icd);
+
+ ret = soc_camera_device_register(icd);
+ if (ret < 0)
+ goto escdevreg;
+
+ icd->user_width = DEFAULT_WIDTH;
+ icd->user_height = DEFAULT_HEIGHT;
+
+ return 0;
+
+escdevreg:
+ kfree(icd);
+
+ return ret;
+}
+
+/*
+ * Only called on rmmod for each platform device, since they are not
+ * hot-pluggable. Now we know, that all our users - hosts and devices have
+ * been unloaded already
+ */
+static int __devexit soc_camera_pdrv_remove(struct platform_device *pdev)
+{
+ struct soc_camera_device *icd = platform_get_drvdata(pdev);
+
+ if (!icd)
+ return -EINVAL;
+
+ list_del(&icd->list);
+
+ kfree(icd);
+
+ return 0;
+}
+
+static struct platform_driver __refdata soc_camera_pdrv = {
+ .probe = soc_camera_pdrv_probe,
+ .remove = __devexit_p(soc_camera_pdrv_remove),
+ .driver = {
+ .name = "soc-camera-pdrv",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init soc_camera_init(void)
+{
+ return platform_driver_register(&soc_camera_pdrv);
+}
+
+static void __exit soc_camera_exit(void)
+{
+ platform_driver_unregister(&soc_camera_pdrv);
+}
+
+module_init(soc_camera_init);
+module_exit(soc_camera_exit);
+
+MODULE_DESCRIPTION("Image capture bus driver");
+MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:soc-camera-pdrv");
diff --git a/drivers/media/platform/soc_camera/soc_camera_platform.c b/drivers/media/platform/soc_camera/soc_camera_platform.c
new file mode 100644
index 0000000..7cf7fd1
--- /dev/null
+++ b/drivers/media/platform/soc_camera/soc_camera_platform.c
@@ -0,0 +1,206 @@
+/*
+ * Generic Platform Camera Driver
+ *
+ * Copyright (C) 2008 Magnus Damm
+ * Based on mt9m001 driver,
+ * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-subdev.h>
+#include <media/soc_camera.h>
+#include <media/soc_camera_platform.h>
+
+struct soc_camera_platform_priv {
+ struct v4l2_subdev subdev;
+};
+
+static struct soc_camera_platform_priv *get_priv(struct platform_device *pdev)
+{
+ struct v4l2_subdev *subdev = platform_get_drvdata(pdev);
+ return container_of(subdev, struct soc_camera_platform_priv, subdev);
+}
+
+static int soc_camera_platform_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
+ return p->set_capture(p, enable);
+}
+
+static int soc_camera_platform_fill_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
+
+ mf->width = p->format.width;
+ mf->height = p->format.height;
+ mf->code = p->format.code;
+ mf->colorspace = p->format.colorspace;
+ mf->field = p->format.field;
+
+ return 0;
+}
+
+static int soc_camera_platform_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
+
+ return soc_camera_set_power(p->icd->control, p->icd->link, on);
+}
+
+static struct v4l2_subdev_core_ops platform_subdev_core_ops = {
+ .s_power = soc_camera_platform_s_power,
+};
+
+static int soc_camera_platform_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
+
+ if (index)
+ return -EINVAL;
+
+ *code = p->format.code;
+ return 0;
+}
+
+static int soc_camera_platform_g_crop(struct v4l2_subdev *sd,
+ struct v4l2_crop *a)
+{
+ struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
+
+ a->c.left = 0;
+ a->c.top = 0;
+ a->c.width = p->format.width;
+ a->c.height = p->format.height;
+ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+ return 0;
+}
+
+static int soc_camera_platform_cropcap(struct v4l2_subdev *sd,
+ struct v4l2_cropcap *a)
+{
+ struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
+
+ a->bounds.left = 0;
+ a->bounds.top = 0;
+ a->bounds.width = p->format.width;
+ a->bounds.height = p->format.height;
+ a->defrect = a->bounds;
+ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ a->pixelaspect.numerator = 1;
+ a->pixelaspect.denominator = 1;
+
+ return 0;
+}
+
+static int soc_camera_platform_g_mbus_config(struct v4l2_subdev *sd,
+ struct v4l2_mbus_config *cfg)
+{
+ struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
+
+ cfg->flags = p->mbus_param;
+ cfg->type = p->mbus_type;
+
+ return 0;
+}
+
+static struct v4l2_subdev_video_ops platform_subdev_video_ops = {
+ .s_stream = soc_camera_platform_s_stream,
+ .enum_mbus_fmt = soc_camera_platform_enum_fmt,
+ .cropcap = soc_camera_platform_cropcap,
+ .g_crop = soc_camera_platform_g_crop,
+ .try_mbus_fmt = soc_camera_platform_fill_fmt,
+ .g_mbus_fmt = soc_camera_platform_fill_fmt,
+ .s_mbus_fmt = soc_camera_platform_fill_fmt,
+ .g_mbus_config = soc_camera_platform_g_mbus_config,
+};
+
+static struct v4l2_subdev_ops platform_subdev_ops = {
+ .core = &platform_subdev_core_ops,
+ .video = &platform_subdev_video_ops,
+};
+
+static int soc_camera_platform_probe(struct platform_device *pdev)
+{
+ struct soc_camera_host *ici;
+ struct soc_camera_platform_priv *priv;
+ struct soc_camera_platform_info *p = pdev->dev.platform_data;
+ struct soc_camera_device *icd;
+ int ret;
+
+ if (!p)
+ return -EINVAL;
+
+ if (!p->icd) {
+ dev_err(&pdev->dev,
+ "Platform has not set soc_camera_device pointer!\n");
+ return -EINVAL;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ icd = p->icd;
+
+ /* soc-camera convention: control's drvdata points to the subdev */
+ platform_set_drvdata(pdev, &priv->subdev);
+ /* Set the control device reference */
+ icd->control = &pdev->dev;
+
+ ici = to_soc_camera_host(icd->parent);
+
+ v4l2_subdev_init(&priv->subdev, &platform_subdev_ops);
+ v4l2_set_subdevdata(&priv->subdev, p);
+ strncpy(priv->subdev.name, dev_name(&pdev->dev), V4L2_SUBDEV_NAME_SIZE);
+
+ ret = v4l2_device_register_subdev(&ici->v4l2_dev, &priv->subdev);
+ if (ret)
+ goto evdrs;
+
+ return ret;
+
+evdrs:
+ platform_set_drvdata(pdev, NULL);
+ kfree(priv);
+ return ret;
+}
+
+static int soc_camera_platform_remove(struct platform_device *pdev)
+{
+ struct soc_camera_platform_priv *priv = get_priv(pdev);
+ struct soc_camera_platform_info *p = v4l2_get_subdevdata(&priv->subdev);
+
+ p->icd->control = NULL;
+ v4l2_device_unregister_subdev(&priv->subdev);
+ platform_set_drvdata(pdev, NULL);
+ kfree(priv);
+ return 0;
+}
+
+static struct platform_driver soc_camera_platform_driver = {
+ .driver = {
+ .name = "soc_camera_platform",
+ .owner = THIS_MODULE,
+ },
+ .probe = soc_camera_platform_probe,
+ .remove = soc_camera_platform_remove,
+};
+
+module_platform_driver(soc_camera_platform_driver);
+
+MODULE_DESCRIPTION("SoC Camera Platform driver");
+MODULE_AUTHOR("Magnus Damm");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:soc_camera_platform");
diff --git a/drivers/media/platform/soc_camera/soc_mediabus.c b/drivers/media/platform/soc_camera/soc_mediabus.c
new file mode 100644
index 0000000..a397812
--- /dev/null
+++ b/drivers/media/platform/soc_camera/soc_mediabus.c
@@ -0,0 +1,493 @@
+/*
+ * soc-camera media bus helper routines
+ *
+ * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-mediabus.h>
+#include <media/soc_mediabus.h>
+
+static const struct soc_mbus_lookup mbus_fmt[] = {
+{
+ .code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .name = "YUYV",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_YVYU8_2X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .name = "YVYU",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_UYVY8_2X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .name = "UYVY",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_VYUY8_2X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .name = "VYUY",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_RGB555,
+ .name = "RGB555",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_RGB555X,
+ .name = "RGB555X",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_RGB565_2X8_LE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .name = "RGB565",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_RGB565_2X8_BE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_RGB565X,
+ .name = "RGB565X",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_SBGGR8_1X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .name = "Bayer 8 BGGR",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_NONE,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_SBGGR10_1X10,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .name = "Bayer 10 BGGR",
+ .bits_per_sample = 10,
+ .packing = SOC_MBUS_PACKING_EXTEND16,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_Y8_1X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .name = "Grey",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_NONE,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_Y10_1X10,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_Y10,
+ .name = "Grey 10bit",
+ .bits_per_sample = 10,
+ .packing = SOC_MBUS_PACKING_EXTEND16,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .name = "Bayer 10 BGGR",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .name = "Bayer 10 BGGR",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADLO,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .name = "Bayer 10 BGGR",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .name = "Bayer 10 BGGR",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADLO,
+ .order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_JPEG_1X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_JPEG,
+ .name = "JPEG",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_VARIABLE,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_BE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_RGB444,
+ .name = "RGB444",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_YUYV8_1_5X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .name = "YUYV 4:2:0",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_1_5X8,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_YVYU8_1_5X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_YVU420,
+ .name = "YVYU 4:2:0",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_1_5X8,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_UYVY8_1X16,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .name = "UYVY 16bit",
+ .bits_per_sample = 16,
+ .packing = SOC_MBUS_PACKING_EXTEND16,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_VYUY8_1X16,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .name = "VYUY 16bit",
+ .bits_per_sample = 16,
+ .packing = SOC_MBUS_PACKING_EXTEND16,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_YUYV8_1X16,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .name = "YUYV 16bit",
+ .bits_per_sample = 16,
+ .packing = SOC_MBUS_PACKING_EXTEND16,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_YVYU8_1X16,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .name = "YVYU 16bit",
+ .bits_per_sample = 16,
+ .packing = SOC_MBUS_PACKING_EXTEND16,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_SGRBG8_1X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .name = "Bayer 8 GRBG",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_NONE,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SGRBG10DPCM8,
+ .name = "Bayer 10 BGGR DPCM 8",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_NONE,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_SGBRG10_1X10,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ .name = "Bayer 10 GBRG",
+ .bits_per_sample = 10,
+ .packing = SOC_MBUS_PACKING_EXTEND16,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_SGRBG10_1X10,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .name = "Bayer 10 GRBG",
+ .bits_per_sample = 10,
+ .packing = SOC_MBUS_PACKING_EXTEND16,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_SRGGB10_1X10,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ .name = "Bayer 10 RGGB",
+ .bits_per_sample = 10,
+ .packing = SOC_MBUS_PACKING_EXTEND16,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_SBGGR12_1X12,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SBGGR12,
+ .name = "Bayer 12 BGGR",
+ .bits_per_sample = 12,
+ .packing = SOC_MBUS_PACKING_EXTEND16,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_SGBRG12_1X12,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SGBRG12,
+ .name = "Bayer 12 GBRG",
+ .bits_per_sample = 12,
+ .packing = SOC_MBUS_PACKING_EXTEND16,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_SGRBG12_1X12,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .name = "Bayer 12 GRBG",
+ .bits_per_sample = 12,
+ .packing = SOC_MBUS_PACKING_EXTEND16,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = V4L2_MBUS_FMT_SRGGB12_1X12,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SRGGB12,
+ .name = "Bayer 12 RGGB",
+ .bits_per_sample = 12,
+ .packing = SOC_MBUS_PACKING_EXTEND16,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+},
+};
+
+int soc_mbus_samples_per_pixel(const struct soc_mbus_pixelfmt *mf,
+ unsigned int *numerator, unsigned int *denominator)
+{
+ switch (mf->packing) {
+ case SOC_MBUS_PACKING_NONE:
+ case SOC_MBUS_PACKING_EXTEND16:
+ *numerator = 1;
+ *denominator = 1;
+ return 0;
+ case SOC_MBUS_PACKING_2X8_PADHI:
+ case SOC_MBUS_PACKING_2X8_PADLO:
+ *numerator = 2;
+ *denominator = 1;
+ return 0;
+ case SOC_MBUS_PACKING_1_5X8:
+ *numerator = 3;
+ *denominator = 2;
+ return 0;
+ case SOC_MBUS_PACKING_VARIABLE:
+ *numerator = 0;
+ *denominator = 1;
+ return 0;
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL(soc_mbus_samples_per_pixel);
+
+s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf)
+{
+ if (mf->fourcc == V4L2_PIX_FMT_JPEG)
+ return 0;
+
+ if (mf->layout != SOC_MBUS_LAYOUT_PACKED)
+ return width * mf->bits_per_sample / 8;
+
+ switch (mf->packing) {
+ case SOC_MBUS_PACKING_NONE:
+ return width * mf->bits_per_sample / 8;
+ case SOC_MBUS_PACKING_2X8_PADHI:
+ case SOC_MBUS_PACKING_2X8_PADLO:
+ case SOC_MBUS_PACKING_EXTEND16:
+ return width * 2;
+ case SOC_MBUS_PACKING_1_5X8:
+ return width * 3 / 2;
+ case SOC_MBUS_PACKING_VARIABLE:
+ return 0;
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL(soc_mbus_bytes_per_line);
+
+s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf,
+ u32 bytes_per_line, u32 height)
+{
+ if (mf->fourcc == V4L2_PIX_FMT_JPEG)
+ return 0;
+
+ if (mf->layout == SOC_MBUS_LAYOUT_PACKED)
+ return bytes_per_line * height;
+
+ switch (mf->packing) {
+ case SOC_MBUS_PACKING_2X8_PADHI:
+ case SOC_MBUS_PACKING_2X8_PADLO:
+ return bytes_per_line * height * 2;
+ case SOC_MBUS_PACKING_1_5X8:
+ return bytes_per_line * height * 3 / 2;
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(soc_mbus_image_size);
+
+const struct soc_mbus_pixelfmt *soc_mbus_find_fmtdesc(
+ enum v4l2_mbus_pixelcode code,
+ const struct soc_mbus_lookup *lookup,
+ int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ if (lookup[i].code == code)
+ return &lookup[i].fmt;
+
+ return NULL;
+}
+EXPORT_SYMBOL(soc_mbus_find_fmtdesc);
+
+const struct soc_mbus_pixelfmt *soc_mbus_get_fmtdesc(
+ enum v4l2_mbus_pixelcode code)
+{
+ return soc_mbus_find_fmtdesc(code, mbus_fmt, ARRAY_SIZE(mbus_fmt));
+}
+EXPORT_SYMBOL(soc_mbus_get_fmtdesc);
+
+unsigned int soc_mbus_config_compatible(const struct v4l2_mbus_config *cfg,
+ unsigned int flags)
+{
+ unsigned long common_flags;
+ bool hsync = true, vsync = true, pclk, data, mode;
+ bool mipi_lanes, mipi_clock;
+
+ common_flags = cfg->flags & flags;
+
+ switch (cfg->type) {
+ case V4L2_MBUS_PARALLEL:
+ hsync = common_flags & (V4L2_MBUS_HSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_HSYNC_ACTIVE_LOW);
+ vsync = common_flags & (V4L2_MBUS_VSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_VSYNC_ACTIVE_LOW);
+ case V4L2_MBUS_BT656:
+ pclk = common_flags & (V4L2_MBUS_PCLK_SAMPLE_RISING |
+ V4L2_MBUS_PCLK_SAMPLE_FALLING);
+ data = common_flags & (V4L2_MBUS_DATA_ACTIVE_HIGH |
+ V4L2_MBUS_DATA_ACTIVE_LOW);
+ mode = common_flags & (V4L2_MBUS_MASTER | V4L2_MBUS_SLAVE);
+ return (!hsync || !vsync || !pclk || !data || !mode) ?
+ 0 : common_flags;
+ case V4L2_MBUS_CSI2:
+ mipi_lanes = common_flags & V4L2_MBUS_CSI2_LANES;
+ mipi_clock = common_flags & (V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK |
+ V4L2_MBUS_CSI2_CONTINUOUS_CLOCK);
+ return (!mipi_lanes || !mipi_clock) ? 0 : common_flags;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(soc_mbus_config_compatible);
+
+static int __init soc_mbus_init(void)
+{
+ return 0;
+}
+
+static void __exit soc_mbus_exit(void)
+{
+}
+
+module_init(soc_mbus_init);
+module_exit(soc_mbus_exit);
+
+MODULE_DESCRIPTION("soc-camera media bus interface");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_LICENSE("GPL v2");