summaryrefslogtreecommitdiff
path: root/drivers/staging/fsl_rman
diff options
context:
space:
mode:
authorMinghuan Lian <Minghuan.Lian@freescale.com>2014-05-06 17:26:31 (GMT)
committerJose Rivera <German.Rivera@freescale.com>2014-05-07 16:24:43 (GMT)
commit0130650762eeb3d58f7d1dfae2d6499a66eb0059 (patch)
tree9353c1bc95cba1f3be573a9cbfc4944f1a811eef /drivers/staging/fsl_rman
parent0d4f80cf2446f15650703d775b2dfc2328a71cac (diff)
downloadlinux-fsl-qoriq-0130650762eeb3d58f7d1dfae2d6499a66eb0059.tar.xz
fsl/RMan: add RMan private mmap function
The latest kernel UIO mmap() can not map an unaligned page address, however the RMan global register is not page aligned, and an inbound block register size is also less than a page size. So, the patch provides a private mmap function to support RMan register map. Signed-off-by: Minghuan Lian <Minghuan.Lian@freescale.com> Change-Id: Idfff04bb0cfa418b3ccb07448ad31a75d8f04ca7 Reviewed-on: http://git.am.freescale.net:8181/11886 Tested-by: Review Code-CDREVIEW <CDREVIEW@freescale.com> Reviewed-by: Gang Liu <Gang.Liu@freescale.com> Reviewed-by: Tiefei Zang <tie-fei.zang@freescale.com> Reviewed-by: Jose Rivera <German.Rivera@freescale.com>
Diffstat (limited to 'drivers/staging/fsl_rman')
-rw-r--r--drivers/staging/fsl_rman/rman_uio_driver.c30
1 files changed, 30 insertions, 0 deletions
diff --git a/drivers/staging/fsl_rman/rman_uio_driver.c b/drivers/staging/fsl_rman/rman_uio_driver.c
index 4859a5f..e5c0d87 100644
--- a/drivers/staging/fsl_rman/rman_uio_driver.c
+++ b/drivers/staging/fsl_rman/rman_uio_driver.c
@@ -27,6 +27,7 @@
#include <linux/uio_driver.h>
#include <linux/slab.h>
#include <linux/list.h>
+#include <linux/mm.h>
static const char rman_uio_version[] = "RMan UIO driver v1.0";
@@ -70,6 +71,33 @@ static int rman_uio_open(struct uio_info *info, struct inode *inode)
return 0;
}
+static int rman_uio_mmap(struct uio_info *info, struct vm_area_struct *vma)
+{
+ int mi;
+ struct uio_mem *mem;
+ unsigned long size;
+
+ if (vma->vm_pgoff < MAX_UIO_MAPS) {
+ if (info->mem[vma->vm_pgoff].size == 0)
+ return -EINVAL;
+ mi = (int)vma->vm_pgoff;
+ } else
+ return -EINVAL;
+
+ mem = &info->mem[mi];
+
+ size = min(vma->vm_end - vma->vm_start, mem->size);
+ size = max(size, PAGE_SIZE);
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ return remap_pfn_range(vma,
+ vma->vm_start,
+ mem->addr >> PAGE_SHIFT,
+ size,
+ vma->vm_page_prot);
+}
+
static int rman_uio_release(struct uio_info *info, struct inode *inode)
{
struct rman_uio_info *i = container_of(info, struct rman_uio_info, uio);
@@ -111,6 +139,7 @@ static int rman_uio_init(struct rman_dev *rmdev)
info->uio.handler = rman_uio_irq_handler;
info->uio.open = rman_uio_open;
info->uio.release = rman_uio_release;
+ info->uio.mmap = rman_uio_mmap;
info->uio.priv = rmdev;
ret = uio_register_device(rmdev->dev, &info->uio);
if (ret) {
@@ -135,6 +164,7 @@ static int rman_ib_uio_init(struct rman_inbound_block *ib)
info->uio.mem[0].memtype = UIO_MEM_PHYS;
info->uio.open = rman_uio_open;
info->uio.release = rman_uio_release;
+ info->uio.mmap = rman_uio_mmap;
info->uio.priv = ib;
ret = uio_register_device(ib->dev, &info->uio);
if (ret) {