summaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/Kconfig14
-rw-r--r--drivers/iommu/amd_iommu.c8
-rw-r--r--drivers/iommu/amd_iommu_v2.c184
-rw-r--r--drivers/iommu/arm-smmu.c4
-rw-r--r--drivers/iommu/exynos-iommu.c1052
-rw-r--r--drivers/iommu/fsl_pamu.c3
-rw-r--r--drivers/iommu/msm_iommu_dev.c38
-rw-r--r--drivers/iommu/omap-iommu.c31
-rw-r--r--drivers/iommu/omap-iopgtable.h3
-rw-r--r--drivers/iommu/shmobile-ipmmu.c20
10 files changed, 732 insertions, 625 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index a22b537..d260605 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -178,13 +178,13 @@ config TEGRA_IOMMU_SMMU
config EXYNOS_IOMMU
bool "Exynos IOMMU Support"
- depends on ARCH_EXYNOS && EXYNOS_DEV_SYSMMU
+ depends on ARCH_EXYNOS
select IOMMU_API
help
- Support for the IOMMU(System MMU) of Samsung Exynos application
- processor family. This enables H/W multimedia accellerators to see
- non-linear physical memory chunks as a linear memory in their
- address spaces
+ Support for the IOMMU (System MMU) of Samsung Exynos application
+ processor family. This enables H/W multimedia accelerators to see
+ non-linear physical memory chunks as linear memory in their
+ address space.
If unsure, say N here.
@@ -193,9 +193,9 @@ config EXYNOS_IOMMU_DEBUG
depends on EXYNOS_IOMMU
help
Select this to see the detailed log message that shows what
- happens in the IOMMU driver
+ happens in the IOMMU driver.
- Say N unless you need kernel log message for IOMMU debugging
+ Say N unless you need kernel log message for IOMMU debugging.
config SHMOBILE_IPMMU
bool
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 57068e8..4aec6a2 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3499,8 +3499,6 @@ int __init amd_iommu_init_passthrough(void)
{
struct iommu_dev_data *dev_data;
struct pci_dev *dev = NULL;
- struct amd_iommu *iommu;
- u16 devid;
int ret;
ret = alloc_passthrough_domain();
@@ -3514,12 +3512,6 @@ int __init amd_iommu_init_passthrough(void)
dev_data = get_dev_data(&dev->dev);
dev_data->passthrough = true;
- devid = get_device_id(&dev->dev);
-
- iommu = amd_iommu_rlookup_table[devid];
- if (!iommu)
- continue;
-
attach_device(&dev->dev, pt_domain);
}
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 203b2e6..d4daa05 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -45,6 +45,8 @@ struct pri_queue {
struct pasid_state {
struct list_head list; /* For global state-list */
atomic_t count; /* Reference count */
+ atomic_t mmu_notifier_count; /* Counting nested mmu_notifier
+ calls */
struct task_struct *task; /* Task bound to this PASID */
struct mm_struct *mm; /* mm_struct for the faults */
struct mmu_notifier mn; /* mmu_otifier handle */
@@ -56,6 +58,8 @@ struct pasid_state {
};
struct device_state {
+ struct list_head list;
+ u16 devid;
atomic_t count;
struct pci_dev *pdev;
struct pasid_state **states;
@@ -81,13 +85,9 @@ struct fault {
u16 flags;
};
-static struct device_state **state_table;
+static LIST_HEAD(state_list);
static spinlock_t state_lock;
-/* List and lock for all pasid_states */
-static LIST_HEAD(pasid_state_list);
-static DEFINE_SPINLOCK(ps_lock);
-
static struct workqueue_struct *iommu_wq;
/*
@@ -99,7 +99,6 @@ static u64 *empty_page_table;
static void free_pasid_states(struct device_state *dev_state);
static void unbind_pasid(struct device_state *dev_state, int pasid);
-static int task_exit(struct notifier_block *nb, unsigned long e, void *data);
static u16 device_id(struct pci_dev *pdev)
{
@@ -111,13 +110,25 @@ static u16 device_id(struct pci_dev *pdev)
return devid;
}
+static struct device_state *__get_device_state(u16 devid)
+{
+ struct device_state *dev_state;
+
+ list_for_each_entry(dev_state, &state_list, list) {
+ if (dev_state->devid == devid)
+ return dev_state;
+ }
+
+ return NULL;
+}
+
static struct device_state *get_device_state(u16 devid)
{
struct device_state *dev_state;
unsigned long flags;
spin_lock_irqsave(&state_lock, flags);
- dev_state = state_table[devid];
+ dev_state = __get_device_state(devid);
if (dev_state != NULL)
atomic_inc(&dev_state->count);
spin_unlock_irqrestore(&state_lock, flags);
@@ -158,29 +169,6 @@ static void put_device_state_wait(struct device_state *dev_state)
free_device_state(dev_state);
}
-static struct notifier_block profile_nb = {
- .notifier_call = task_exit,
-};
-
-static void link_pasid_state(struct pasid_state *pasid_state)
-{
- spin_lock(&ps_lock);
- list_add_tail(&pasid_state->list, &pasid_state_list);
- spin_unlock(&ps_lock);
-}
-
-static void __unlink_pasid_state(struct pasid_state *pasid_state)
-{
- list_del(&pasid_state->list);
-}
-
-static void unlink_pasid_state(struct pasid_state *pasid_state)
-{
- spin_lock(&ps_lock);
- __unlink_pasid_state(pasid_state);
- spin_unlock(&ps_lock);
-}
-
/* Must be called under dev_state->lock */
static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
int pasid, bool alloc)
@@ -337,7 +325,6 @@ static void unbind_pasid(struct device_state *dev_state, int pasid)
if (pasid_state == NULL)
return;
- unlink_pasid_state(pasid_state);
__unbind_pasid(pasid_state);
put_pasid_state_wait(pasid_state); /* Reference taken in this function */
}
@@ -379,7 +366,12 @@ static void free_pasid_states(struct device_state *dev_state)
continue;
put_pasid_state(pasid_state);
- unbind_pasid(dev_state, i);
+
+ /*
+ * This will call the mn_release function and
+ * unbind the PASID
+ */
+ mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
}
if (dev_state->pasid_levels == 2)
@@ -443,8 +435,11 @@ static void mn_invalidate_range_start(struct mmu_notifier *mn,
pasid_state = mn_to_state(mn);
dev_state = pasid_state->device_state;
- amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid,
- __pa(empty_page_table));
+ if (atomic_add_return(1, &pasid_state->mmu_notifier_count) == 1) {
+ amd_iommu_domain_set_gcr3(dev_state->domain,
+ pasid_state->pasid,
+ __pa(empty_page_table));
+ }
}
static void mn_invalidate_range_end(struct mmu_notifier *mn,
@@ -457,11 +452,31 @@ static void mn_invalidate_range_end(struct mmu_notifier *mn,
pasid_state = mn_to_state(mn);
dev_state = pasid_state->device_state;
- amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid,
- __pa(pasid_state->mm->pgd));
+ if (atomic_dec_and_test(&pasid_state->mmu_notifier_count)) {
+ amd_iommu_domain_set_gcr3(dev_state->domain,
+ pasid_state->pasid,
+ __pa(pasid_state->mm->pgd));
+ }
+}
+
+static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
+{
+ struct pasid_state *pasid_state;
+ struct device_state *dev_state;
+
+ might_sleep();
+
+ pasid_state = mn_to_state(mn);
+ dev_state = pasid_state->device_state;
+
+ if (pasid_state->device_state->inv_ctx_cb)
+ dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
+
+ unbind_pasid(dev_state, pasid_state->pasid);
}
static struct mmu_notifier_ops iommu_mn = {
+ .release = mn_release,
.clear_flush_young = mn_clear_flush_young,
.change_pte = mn_change_pte,
.invalidate_page = mn_invalidate_page,
@@ -606,53 +621,6 @@ static struct notifier_block ppr_nb = {
.notifier_call = ppr_notifier,
};
-static int task_exit(struct notifier_block *nb, unsigned long e, void *data)
-{
- struct pasid_state *pasid_state;
- struct task_struct *task;
-
- task = data;
-
- /*
- * Using this notifier is a hack - but there is no other choice
- * at the moment. What I really want is a sleeping notifier that
- * is called when an MM goes down. But such a notifier doesn't
- * exist yet. The notifier needs to sleep because it has to make
- * sure that the device does not use the PASID and the address
- * space anymore before it is destroyed. This includes waiting
- * for pending PRI requests to pass the workqueue. The
- * MMU-Notifiers would be a good fit, but they use RCU and so
- * they are not allowed to sleep. Lets see how we can solve this
- * in a more intelligent way in the future.
- */
-again:
- spin_lock(&ps_lock);
- list_for_each_entry(pasid_state, &pasid_state_list, list) {
- struct device_state *dev_state;
- int pasid;
-
- if (pasid_state->task != task)
- continue;
-
- /* Drop Lock and unbind */
- spin_unlock(&ps_lock);
-
- dev_state = pasid_state->device_state;
- pasid = pasid_state->pasid;
-
- if (pasid_state->device_state->inv_ctx_cb)
- dev_state->inv_ctx_cb(dev_state->pdev, pasid);
-
- unbind_pasid(dev_state, pasid);
-
- /* Task may be in the list multiple times */
- goto again;
- }
- spin_unlock(&ps_lock);
-
- return NOTIFY_OK;
-}
-
int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
struct task_struct *task)
{
@@ -682,6 +650,7 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
goto out;
atomic_set(&pasid_state->count, 1);
+ atomic_set(&pasid_state->mmu_notifier_count, 0);
init_waitqueue_head(&pasid_state->wq);
spin_lock_init(&pasid_state->lock);
@@ -705,8 +674,6 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
if (ret)
goto out_clear_state;
- link_pasid_state(pasid_state);
-
return 0;
out_clear_state:
@@ -727,6 +694,7 @@ EXPORT_SYMBOL(amd_iommu_bind_pasid);
void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
{
+ struct pasid_state *pasid_state;
struct device_state *dev_state;
u16 devid;
@@ -743,7 +711,17 @@ void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
if (pasid < 0 || pasid >= dev_state->max_pasids)
goto out;
- unbind_pasid(dev_state, pasid);
+ pasid_state = get_pasid_state(dev_state, pasid);
+ if (pasid_state == NULL)
+ goto out;
+ /*
+ * Drop reference taken here. We are safe because we still hold
+ * the reference taken in the amd_iommu_bind_pasid function.
+ */
+ put_pasid_state(pasid_state);
+
+ /* This will call the mn_release function and unbind the PASID */
+ mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
out:
put_device_state(dev_state);
@@ -773,7 +751,8 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
spin_lock_init(&dev_state->lock);
init_waitqueue_head(&dev_state->wq);
- dev_state->pdev = pdev;
+ dev_state->pdev = pdev;
+ dev_state->devid = devid;
tmp = pasids;
for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
@@ -803,13 +782,13 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
spin_lock_irqsave(&state_lock, flags);
- if (state_table[devid] != NULL) {
+ if (__get_device_state(devid) != NULL) {
spin_unlock_irqrestore(&state_lock, flags);
ret = -EBUSY;
goto out_free_domain;
}
- state_table[devid] = dev_state;
+ list_add_tail(&dev_state->list, &state_list);
spin_unlock_irqrestore(&state_lock, flags);
@@ -841,13 +820,13 @@ void amd_iommu_free_device(struct pci_dev *pdev)
spin_lock_irqsave(&state_lock, flags);
- dev_state = state_table[devid];
+ dev_state = __get_device_state(devid);
if (dev_state == NULL) {
spin_unlock_irqrestore(&state_lock, flags);
return;
}
- state_table[devid] = NULL;
+ list_del(&dev_state->list);
spin_unlock_irqrestore(&state_lock, flags);
@@ -874,7 +853,7 @@ int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
spin_lock_irqsave(&state_lock, flags);
ret = -EINVAL;
- dev_state = state_table[devid];
+ dev_state = __get_device_state(devid);
if (dev_state == NULL)
goto out_unlock;
@@ -905,7 +884,7 @@ int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
spin_lock_irqsave(&state_lock, flags);
ret = -EINVAL;
- dev_state = state_table[devid];
+ dev_state = __get_device_state(devid);
if (dev_state == NULL)
goto out_unlock;
@@ -922,7 +901,6 @@ EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
static int __init amd_iommu_v2_init(void)
{
- size_t state_table_size;
int ret;
pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>\n");
@@ -938,16 +916,10 @@ static int __init amd_iommu_v2_init(void)
spin_lock_init(&state_lock);
- state_table_size = MAX_DEVICES * sizeof(struct device_state *);
- state_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(state_table_size));
- if (state_table == NULL)
- return -ENOMEM;
-
ret = -ENOMEM;
iommu_wq = create_workqueue("amd_iommu_v2");
if (iommu_wq == NULL)
- goto out_free;
+ goto out;
ret = -ENOMEM;
empty_page_table = (u64 *)get_zeroed_page(GFP_KERNEL);
@@ -955,29 +927,24 @@ static int __init amd_iommu_v2_init(void)
goto out_destroy_wq;
amd_iommu_register_ppr_notifier(&ppr_nb);
- profile_event_register(PROFILE_TASK_EXIT, &profile_nb);
return 0;
out_destroy_wq:
destroy_workqueue(iommu_wq);
-out_free:
- free_pages((unsigned long)state_table, get_order(state_table_size));
-
+out:
return ret;
}
static void __exit amd_iommu_v2_exit(void)
{
struct device_state *dev_state;
- size_t state_table_size;
int i;
if (!amd_iommu_v2_supported())
return;
- profile_event_unregister(PROFILE_TASK_EXIT, &profile_nb);
amd_iommu_unregister_ppr_notifier(&ppr_nb);
flush_workqueue(iommu_wq);
@@ -1000,9 +967,6 @@ static void __exit amd_iommu_v2_exit(void)
destroy_workqueue(iommu_wq);
- state_table_size = MAX_DEVICES * sizeof(struct device_state *);
- free_pages((unsigned long)state_table, get_order(state_table_size));
-
free_page((unsigned long)empty_page_table);
}
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 647c3c7..1599354 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1167,7 +1167,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
for (i = 0; i < master->num_streamids; ++i) {
u32 idx, s2cr;
idx = master->smrs ? master->smrs[i].idx : master->streamids[i];
- s2cr = (S2CR_TYPE_TRANS << S2CR_TYPE_SHIFT) |
+ s2cr = S2CR_TYPE_TRANS |
(smmu_domain->root_cfg.cbndx << S2CR_CBNDX_SHIFT);
writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
}
@@ -1804,7 +1804,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
* allocation (PTRS_PER_PGD).
*/
#ifdef CONFIG_64BIT
- smmu->s1_output_size = min(39UL, size);
+ smmu->s1_output_size = min((unsigned long)VA_BITS, size);
#else
smmu->s1_output_size = min(32UL, size);
#endif
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 0740189..99054d2 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -29,7 +29,8 @@
#include <asm/cacheflush.h>
#include <asm/pgtable.h>
-#include <mach/sysmmu.h>
+typedef u32 sysmmu_iova_t;
+typedef u32 sysmmu_pte_t;
/* We does not consider super section mapping (16MB) */
#define SECT_ORDER 20
@@ -44,28 +45,44 @@
#define LPAGE_MASK (~(LPAGE_SIZE - 1))
#define SPAGE_MASK (~(SPAGE_SIZE - 1))
-#define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
-#define lv1ent_page(sent) ((*(sent) & 3) == 1)
+#define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
+ ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
+#define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
+#define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
+#define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
+ ((*(sent) & 3) == 1))
#define lv1ent_section(sent) ((*(sent) & 3) == 2)
#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
#define lv2ent_small(pent) ((*(pent) & 2) == 2)
#define lv2ent_large(pent) ((*(pent) & 3) == 1)
+static u32 sysmmu_page_offset(sysmmu_iova_t iova, u32 size)
+{
+ return iova & (size - 1);
+}
+
#define section_phys(sent) (*(sent) & SECT_MASK)
-#define section_offs(iova) ((iova) & 0xFFFFF)
+#define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE)
#define lpage_phys(pent) (*(pent) & LPAGE_MASK)
-#define lpage_offs(iova) ((iova) & 0xFFFF)
+#define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE)
#define spage_phys(pent) (*(pent) & SPAGE_MASK)
-#define spage_offs(iova) ((iova) & 0xFFF)
-
-#define lv1ent_offset(iova) ((iova) >> SECT_ORDER)
-#define lv2ent_offset(iova) (((iova) & 0xFF000) >> SPAGE_ORDER)
+#define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE)
#define NUM_LV1ENTRIES 4096
-#define NUM_LV2ENTRIES 256
+#define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
+
+static u32 lv1ent_offset(sysmmu_iova_t iova)
+{
+ return iova >> SECT_ORDER;
+}
+
+static u32 lv2ent_offset(sysmmu_iova_t iova)
+{
+ return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
+}
-#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
+#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
@@ -80,6 +97,13 @@
#define CTRL_BLOCK 0x7
#define CTRL_DISABLE 0x0
+#define CFG_LRU 0x1
+#define CFG_QOS(n) ((n & 0xF) << 7)
+#define CFG_MASK 0x0150FFFF /* Selecting bit 0-15, 20, 22 and 24 */
+#define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
+#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
+#define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
+
#define REG_MMU_CTRL 0x000
#define REG_MMU_CFG 0x004
#define REG_MMU_STATUS 0x008
@@ -96,19 +120,32 @@
#define REG_MMU_VERSION 0x034
+#define MMU_MAJ_VER(val) ((val) >> 7)
+#define MMU_MIN_VER(val) ((val) & 0x7F)
+#define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
+
+#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
+
#define REG_PB0_SADDR 0x04C
#define REG_PB0_EADDR 0x050
#define REG_PB1_SADDR 0x054
#define REG_PB1_EADDR 0x058
-static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
+#define has_sysmmu(dev) (dev->archdata.iommu != NULL)
+
+static struct kmem_cache *lv2table_kmem_cache;
+static sysmmu_pte_t *zero_lv2_table;
+#define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
+
+static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
{
return pgtable + lv1ent_offset(iova);
}
-static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
+static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
{
- return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova);
+ return (sysmmu_pte_t *)phys_to_virt(
+ lv2table_base(sent)) + lv2ent_offset(iova);
}
enum exynos_sysmmu_inttype {
@@ -124,16 +161,6 @@ enum exynos_sysmmu_inttype {
SYSMMU_FAULTS_NUM
};
-/*
- * @itype: type of fault.
- * @pgtable_base: the physical address of page table base. This is 0 if @itype
- * is SYSMMU_BUSERROR.
- * @fault_addr: the device (virtual) address that the System MMU tried to
- * translated. This is 0 if @itype is SYSMMU_BUSERROR.
- */
-typedef int (*sysmmu_fault_handler_t)(enum exynos_sysmmu_inttype itype,
- unsigned long pgtable_base, unsigned long fault_addr);
-
static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
REG_PAGE_FAULT_ADDR,
REG_AR_FAULT_ADDR,
@@ -157,27 +184,34 @@ static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
"UNKNOWN FAULT"
};
+/* attached to dev.archdata.iommu of the master device */
+struct exynos_iommu_owner {
+ struct list_head client; /* entry of exynos_iommu_domain.clients */
+ struct device *dev;
+ struct device *sysmmu;
+ struct iommu_domain *domain;
+ void *vmm_data; /* IO virtual memory manager's data */
+ spinlock_t lock; /* Lock to preserve consistency of System MMU */
+};
+
struct exynos_iommu_domain {
struct list_head clients; /* list of sysmmu_drvdata.node */
- unsigned long *pgtable; /* lv1 page table, 16KB */
+ sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
short *lv2entcnt; /* free lv2 entry counter for each section */
spinlock_t lock; /* lock for this structure */
spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
};
struct sysmmu_drvdata {
- struct list_head node; /* entry of exynos_iommu_domain.clients */
struct device *sysmmu; /* System MMU's device descriptor */
- struct device *dev; /* Owner of system MMU */
- char *dbgname;
- int nsfrs;
- void __iomem **sfrbases;
- struct clk *clk[2];
+ struct device *master; /* Owner of system MMU */
+ void __iomem *sfrbase;
+ struct clk *clk;
+ struct clk *clk_master;
int activations;
- rwlock_t lock;
+ spinlock_t lock;
struct iommu_domain *domain;
- sysmmu_fault_handler_t fault_handler;
- unsigned long pgtable;
+ phys_addr_t pgtable;
};
static bool set_sysmmu_active(struct sysmmu_drvdata *data)
@@ -204,6 +238,11 @@ static void sysmmu_unblock(void __iomem *sfrbase)
__raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
}
+static unsigned int __raw_sysmmu_version(struct sysmmu_drvdata *data)
+{
+ return MMU_RAW_VER(__raw_readl(data->sfrbase + REG_MMU_VERSION));
+}
+
static bool sysmmu_block(void __iomem *sfrbase)
{
int i = 120;
@@ -226,434 +265,428 @@ static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
}
static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
- unsigned long iova)
+ sysmmu_iova_t iova, unsigned int num_inv)
{
- __raw_writel((iova & SPAGE_MASK) | 1, sfrbase + REG_MMU_FLUSH_ENTRY);
+ unsigned int i;
+
+ for (i = 0; i < num_inv; i++) {
+ __raw_writel((iova & SPAGE_MASK) | 1,
+ sfrbase + REG_MMU_FLUSH_ENTRY);
+ iova += SPAGE_SIZE;
+ }
}
static void __sysmmu_set_ptbase(void __iomem *sfrbase,
- unsigned long pgd)
+ phys_addr_t pgd)
{
- __raw_writel(0x1, sfrbase + REG_MMU_CFG); /* 16KB LV1, LRU */
__raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
__sysmmu_tlb_invalidate(sfrbase);
}
-static void __sysmmu_set_prefbuf(void __iomem *sfrbase, unsigned long base,
- unsigned long size, int idx)
-{
- __raw_writel(base, sfrbase + REG_PB0_SADDR + idx * 8);
- __raw_writel(size - 1 + base, sfrbase + REG_PB0_EADDR + idx * 8);
-}
-
-static void __set_fault_handler(struct sysmmu_drvdata *data,
- sysmmu_fault_handler_t handler)
-{
- unsigned long flags;
-
- write_lock_irqsave(&data->lock, flags);
- data->fault_handler = handler;
- write_unlock_irqrestore(&data->lock, flags);
-}
-
-void exynos_sysmmu_set_fault_handler(struct device *dev,
- sysmmu_fault_handler_t handler)
-{
- struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
-
- __set_fault_handler(data, handler);
-}
-
-static int default_fault_handler(enum exynos_sysmmu_inttype itype,
- unsigned long pgtable_base, unsigned long fault_addr)
+static void show_fault_information(const char *name,
+ enum exynos_sysmmu_inttype itype,
+ phys_addr_t pgtable_base, sysmmu_iova_t fault_addr)
{
- unsigned long *ent;
+ sysmmu_pte_t *ent;
if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
itype = SYSMMU_FAULT_UNKNOWN;
- pr_err("%s occurred at 0x%lx(Page table base: 0x%lx)\n",
- sysmmu_fault_name[itype], fault_addr, pgtable_base);
+ pr_err("%s occurred at %#x by %s(Page table base: %pa)\n",
+ sysmmu_fault_name[itype], fault_addr, name, &pgtable_base);
- ent = section_entry(__va(pgtable_base), fault_addr);
- pr_err("\tLv1 entry: 0x%lx\n", *ent);
+ ent = section_entry(phys_to_virt(pgtable_base), fault_addr);
+ pr_err("\tLv1 entry: %#x\n", *ent);
if (lv1ent_page(ent)) {
ent = page_entry(ent, fault_addr);
- pr_err("\t Lv2 entry: 0x%lx\n", *ent);
+ pr_err("\t Lv2 entry: %#x\n", *ent);
}
-
- pr_err("Generating Kernel OOPS... because it is unrecoverable.\n");
-
- BUG();
-
- return 0;
}
static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
{
/* SYSMMU is in blocked when interrupt occurred. */
struct sysmmu_drvdata *data = dev_id;
- struct resource *irqres;
- struct platform_device *pdev;
enum exynos_sysmmu_inttype itype;
- unsigned long addr = -1;
-
- int i, ret = -ENOSYS;
-
- read_lock(&data->lock);
+ sysmmu_iova_t addr = -1;
+ int ret = -ENOSYS;
WARN_ON(!is_sysmmu_active(data));
- pdev = to_platform_device(data->sysmmu);
- for (i = 0; i < (pdev->num_resources / 2); i++) {
- irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i);
- if (irqres && ((int)irqres->start == irq))
- break;
- }
+ spin_lock(&data->lock);
- if (i == pdev->num_resources) {
+ if (!IS_ERR(data->clk_master))
+ clk_enable(data->clk_master);
+
+ itype = (enum exynos_sysmmu_inttype)
+ __ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
+ if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
itype = SYSMMU_FAULT_UNKNOWN;
+ else
+ addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]);
+
+ if (itype == SYSMMU_FAULT_UNKNOWN) {
+ pr_err("%s: Fault is not occurred by System MMU '%s'!\n",
+ __func__, dev_name(data->sysmmu));
+ pr_err("%s: Please check if IRQ is correctly configured.\n",
+ __func__);
+ BUG();
} else {
- itype = (enum exynos_sysmmu_inttype)
- __ffs(__raw_readl(data->sfrbases[i] + REG_INT_STATUS));
- if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
- itype = SYSMMU_FAULT_UNKNOWN;
- else
- addr = __raw_readl(
- data->sfrbases[i] + fault_reg_offset[itype]);
+ unsigned int base =
+ __raw_readl(data->sfrbase + REG_PT_BASE_ADDR);
+ show_fault_information(dev_name(data->sysmmu),
+ itype, base, addr);
+ if (data->domain)
+ ret = report_iommu_fault(data->domain,
+ data->master, addr, itype);
}
- if (data->domain)
- ret = report_iommu_fault(data->domain, data->dev,
- addr, itype);
+ /* fault is not recovered by fault handler */
+ BUG_ON(ret != 0);
- if ((ret == -ENOSYS) && data->fault_handler) {
- unsigned long base = data->pgtable;
- if (itype != SYSMMU_FAULT_UNKNOWN)
- base = __raw_readl(
- data->sfrbases[i] + REG_PT_BASE_ADDR);
- ret = data->fault_handler(itype, base, addr);
- }
+ __raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR);
- if (!ret && (itype != SYSMMU_FAULT_UNKNOWN))
- __raw_writel(1 << itype, data->sfrbases[i] + REG_INT_CLEAR);
- else
- dev_dbg(data->sysmmu, "(%s) %s is not handled.\n",
- data->dbgname, sysmmu_fault_name[itype]);
+ sysmmu_unblock(data->sfrbase);
- if (itype != SYSMMU_FAULT_UNKNOWN)
- sysmmu_unblock(data->sfrbases[i]);
+ if (!IS_ERR(data->clk_master))
+ clk_disable(data->clk_master);
- read_unlock(&data->lock);
+ spin_unlock(&data->lock);
return IRQ_HANDLED;
}
-static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data)
+static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
{
+ if (!IS_ERR(data->clk_master))
+ clk_enable(data->clk_master);
+
+ __raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
+ __raw_writel(0, data->sfrbase + REG_MMU_CFG);
+
+ clk_disable(data->clk);
+ if (!IS_ERR(data->clk_master))
+ clk_disable(data->clk_master);
+}
+
+static bool __sysmmu_disable(struct sysmmu_drvdata *data)
+{
+ bool disabled;
unsigned long flags;
- bool disabled = false;
- int i;
- write_lock_irqsave(&data->lock, flags);
+ spin_lock_irqsave(&data->lock, flags);
- if (!set_sysmmu_inactive(data))
- goto finish;
+ disabled = set_sysmmu_inactive(data);
- for (i = 0; i < data->nsfrs; i++)
- __raw_writel(CTRL_DISABLE, data->sfrbases[i] + REG_MMU_CTRL);
+ if (disabled) {
+ data->pgtable = 0;
+ data->domain = NULL;
- if (data->clk[1])
- clk_disable(data->clk[1]);
- if (data->clk[0])
- clk_disable(data->clk[0]);
+ __sysmmu_disable_nocount(data);
- disabled = true;
- data->pgtable = 0;
- data->domain = NULL;
-finish:
- write_unlock_irqrestore(&data->lock, flags);
+ dev_dbg(data->sysmmu, "Disabled\n");
+ } else {
+ dev_dbg(data->sysmmu, "%d times left to disable\n",
+ data->activations);
+ }
- if (disabled)
- dev_dbg(data->sysmmu, "(%s) Disabled\n", data->dbgname);
- else
- dev_dbg(data->sysmmu, "(%s) %d times left to be disabled\n",
- data->dbgname, data->activations);
+ spin_unlock_irqrestore(&data->lock, flags);
return disabled;
}
-/* __exynos_sysmmu_enable: Enables System MMU
- *
- * returns -error if an error occurred and System MMU is not enabled,
- * 0 if the System MMU has been just enabled and 1 if System MMU was already
- * enabled before.
- */
-static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data,
- unsigned long pgtable, struct iommu_domain *domain)
+static void __sysmmu_init_config(struct sysmmu_drvdata *data)
{
- int i, ret = 0;
- unsigned long flags;
+ unsigned int cfg = CFG_LRU | CFG_QOS(15);
+ unsigned int ver;
+
+ ver = __raw_sysmmu_version(data);
+ if (MMU_MAJ_VER(ver) == 3) {
+ if (MMU_MIN_VER(ver) >= 2) {
+ cfg |= CFG_FLPDCACHE;
+ if (MMU_MIN_VER(ver) == 3) {
+ cfg |= CFG_ACGEN;
+ cfg &= ~CFG_LRU;
+ } else {
+ cfg |= CFG_SYSSEL;
+ }
+ }
+ }
- write_lock_irqsave(&data->lock, flags);
+ __raw_writel(cfg, data->sfrbase + REG_MMU_CFG);
+}
- if (!set_sysmmu_active(data)) {
- if (WARN_ON(pgtable != data->pgtable)) {
- ret = -EBUSY;
- set_sysmmu_inactive(data);
- } else {
- ret = 1;
- }
+static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
+{
+ if (!IS_ERR(data->clk_master))
+ clk_enable(data->clk_master);
+ clk_enable(data->clk);
- dev_dbg(data->sysmmu, "(%s) Already enabled\n", data->dbgname);
- goto finish;
- }
+ __raw_writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
- if (data->clk[0])
- clk_enable(data->clk[0]);
- if (data->clk[1])
- clk_enable(data->clk[1]);
+ __sysmmu_init_config(data);
- data->pgtable = pgtable;
+ __sysmmu_set_ptbase(data->sfrbase, data->pgtable);
- for (i = 0; i < data->nsfrs; i++) {
- __sysmmu_set_ptbase(data->sfrbases[i], pgtable);
+ __raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
- if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
- /* System MMU version is 3.x */
- __raw_writel((1 << 12) | (2 << 28),
- data->sfrbases[i] + REG_MMU_CFG);
- __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 0);
- __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 1);
- }
+ if (!IS_ERR(data->clk_master))
+ clk_disable(data->clk_master);
+}
- __raw_writel(CTRL_ENABLE, data->sfrbases[i] + REG_MMU_CTRL);
+static int __sysmmu_enable(struct sysmmu_drvdata *data,
+ phys_addr_t pgtable, struct iommu_domain *domain)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&data->lock, flags);
+ if (set_sysmmu_active(data)) {
+ data->pgtable = pgtable;
+ data->domain = domain;
+
+ __sysmmu_enable_nocount(data);
+
+ dev_dbg(data->sysmmu, "Enabled\n");
+ } else {
+ ret = (pgtable == data->pgtable) ? 1 : -EBUSY;
+
+ dev_dbg(data->sysmmu, "already enabled\n");
}
- data->domain = domain;
+ if (WARN_ON(ret < 0))
+ set_sysmmu_inactive(data); /* decrement count */
- dev_dbg(data->sysmmu, "(%s) Enabled\n", data->dbgname);
-finish:
- write_unlock_irqrestore(&data->lock, flags);
+ spin_unlock_irqrestore(&data->lock, flags);
return ret;
}
-int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable)
+/* __exynos_sysmmu_enable: Enables System MMU
+ *
+ * returns -error if an error occurred and System MMU is not enabled,
+ * 0 if the System MMU has been just enabled and 1 if System MMU was already
+ * enabled before.
+ */
+static int __exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable,
+ struct iommu_domain *domain)
{
- struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
- int ret;
+ int ret = 0;
+ unsigned long flags;
+ struct exynos_iommu_owner *owner = dev->archdata.iommu;
+ struct sysmmu_drvdata *data;
- BUG_ON(!memblock_is_memory(pgtable));
+ BUG_ON(!has_sysmmu(dev));
- ret = pm_runtime_get_sync(data->sysmmu);
- if (ret < 0) {
- dev_dbg(data->sysmmu, "(%s) Failed to enable\n", data->dbgname);
- return ret;
- }
+ spin_lock_irqsave(&owner->lock, flags);
- ret = __exynos_sysmmu_enable(data, pgtable, NULL);
- if (WARN_ON(ret < 0)) {
- pm_runtime_put(data->sysmmu);
- dev_err(data->sysmmu,
- "(%s) Already enabled with page table %#lx\n",
- data->dbgname, data->pgtable);
- } else {
- data->dev = dev;
- }
+ data = dev_get_drvdata(owner->sysmmu);
+
+ ret = __sysmmu_enable(data, pgtable, domain);
+ if (ret >= 0)
+ data->master = dev;
+
+ spin_unlock_irqrestore(&owner->lock, flags);
return ret;
}
+int exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable)
+{
+ BUG_ON(!memblock_is_memory(pgtable));
+
+ return __exynos_sysmmu_enable(dev, pgtable, NULL);
+}
+
static bool exynos_sysmmu_disable(struct device *dev)
{
- struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
- bool disabled;
+ unsigned long flags;
+ bool disabled = true;
+ struct exynos_iommu_owner *owner = dev->archdata.iommu;
+ struct sysmmu_drvdata *data;
+
+ BUG_ON(!has_sysmmu(dev));
+
+ spin_lock_irqsave(&owner->lock, flags);
- disabled = __exynos_sysmmu_disable(data);
- pm_runtime_put(data->sysmmu);
+ data = dev_get_drvdata(owner->sysmmu);
+
+ disabled = __sysmmu_disable(data);
+ if (disabled)
+ data->master = NULL;
+
+ spin_unlock_irqrestore(&owner->lock, flags);
return disabled;
}
-static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova)
+static void __sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
+ sysmmu_iova_t iova)
+{
+ if (__raw_sysmmu_version(data) == MAKE_MMU_VER(3, 3))
+ __raw_writel(iova | 0x1, data->sfrbase + REG_MMU_FLUSH_ENTRY);
+}
+
+static void sysmmu_tlb_invalidate_flpdcache(struct device *dev,
+ sysmmu_iova_t iova)
+{
+ unsigned long flags;
+ struct exynos_iommu_owner *owner = dev->archdata.iommu;
+ struct sysmmu_drvdata *data = dev_get_drvdata(owner->sysmmu);
+
+ if (!IS_ERR(data->clk_master))
+ clk_enable(data->clk_master);
+
+ spin_lock_irqsave(&data->lock, flags);
+ if (is_sysmmu_active(data))
+ __sysmmu_tlb_invalidate_flpdcache(data, iova);
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ if (!IS_ERR(data->clk_master))
+ clk_disable(data->clk_master);
+}
+
+static void sysmmu_tlb_invalidate_entry(struct device *dev, sysmmu_iova_t iova,
+ size_t size)
{
+ struct exynos_iommu_owner *owner = dev->archdata.iommu;
unsigned long flags;
- struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+ struct sysmmu_drvdata *data;
- read_lock_irqsave(&data->lock, flags);
+ data = dev_get_drvdata(owner->sysmmu);
+ spin_lock_irqsave(&data->lock, flags);
if (is_sysmmu_active(data)) {
- int i;
- for (i = 0; i < data->nsfrs; i++) {
- if (sysmmu_block(data->sfrbases[i])) {
- __sysmmu_tlb_invalidate_entry(
- data->sfrbases[i], iova);
- sysmmu_unblock(data->sfrbases[i]);
- }
+ unsigned int num_inv = 1;
+
+ if (!IS_ERR(data->clk_master))
+ clk_enable(data->clk_master);
+
+ /*
+ * L2TLB invalidation required
+ * 4KB page: 1 invalidation
+ * 64KB page: 16 invalidation
+ * 1MB page: 64 invalidation
+ * because it is set-associative TLB
+ * with 8-way and 64 sets.
+ * 1MB page can be cached in one of all sets.
+ * 64KB page can be one of 16 consecutive sets.
+ */
+ if (MMU_MAJ_VER(__raw_sysmmu_version(data)) == 2)
+ num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
+
+ if (sysmmu_block(data->sfrbase)) {
+ __sysmmu_tlb_invalidate_entry(
+ data->sfrbase, iova, num_inv);
+ sysmmu_unblock(data->sfrbase);
}
+ if (!IS_ERR(data->clk_master))
+ clk_disable(data->clk_master);
} else {
- dev_dbg(data->sysmmu,
- "(%s) Disabled. Skipping invalidating TLB.\n",
- data->dbgname);
+ dev_dbg(dev, "disabled. Skipping TLB invalidation @ %#x\n",
+ iova);
}
-
- read_unlock_irqrestore(&data->lock, flags);
+ spin_unlock_irqrestore(&data->lock, flags);
}
void exynos_sysmmu_tlb_invalidate(struct device *dev)
{
+ struct exynos_iommu_owner *owner = dev->archdata.iommu;
unsigned long flags;
- struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+ struct sysmmu_drvdata *data;
- read_lock_irqsave(&data->lock, flags);
+ data = dev_get_drvdata(owner->sysmmu);
+ spin_lock_irqsave(&data->lock, flags);
if (is_sysmmu_active(data)) {
- int i;
- for (i = 0; i < data->nsfrs; i++) {
- if (sysmmu_block(data->sfrbases[i])) {
- __sysmmu_tlb_invalidate(data->sfrbases[i]);
- sysmmu_unblock(data->sfrbases[i]);
- }
+ if (!IS_ERR(data->clk_master))
+ clk_enable(data->clk_master);
+ if (sysmmu_block(data->sfrbase)) {
+ __sysmmu_tlb_invalidate(data->sfrbase);
+ sysmmu_unblock(data->sfrbase);
}
+ if (!IS_ERR(data->clk_master))
+ clk_disable(data->clk_master);
} else {
- dev_dbg(data->sysmmu,
- "(%s) Disabled. Skipping invalidating TLB.\n",
- data->dbgname);
+ dev_dbg(dev, "disabled. Skipping TLB invalidation\n");
}
-
- read_unlock_irqrestore(&data->lock, flags);
+ spin_unlock_irqrestore(&data->lock, flags);
}
-static int exynos_sysmmu_probe(struct platform_device *pdev)
+static int __init exynos_sysmmu_probe(struct platform_device *pdev)
{
- int i, ret;
- struct device *dev;
+ int irq, ret;
+ struct device *dev = &pdev->dev;
struct sysmmu_drvdata *data;
+ struct resource *res;
- dev = &pdev->dev;
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data) {
- dev_dbg(dev, "Not enough memory\n");
- ret = -ENOMEM;
- goto err_alloc;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->sfrbase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(data->sfrbase))
+ return PTR_ERR(data->sfrbase);
- ret = dev_set_drvdata(dev, data);
- if (ret) {
- dev_dbg(dev, "Unabled to initialize driver data\n");
- goto err_init;
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "Unable to find IRQ resource\n");
+ return irq;
}
- data->nsfrs = pdev->num_resources / 2;
- data->sfrbases = kmalloc(sizeof(*data->sfrbases) * data->nsfrs,
- GFP_KERNEL);
- if (data->sfrbases == NULL) {
- dev_dbg(dev, "Not enough memory\n");
- ret = -ENOMEM;
- goto err_init;
- }
-
- for (i = 0; i < data->nsfrs; i++) {
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (!res) {
- dev_dbg(dev, "Unable to find IOMEM region\n");
- ret = -ENOENT;
- goto err_res;
- }
-
- data->sfrbases[i] = ioremap(res->start, resource_size(res));
- if (!data->sfrbases[i]) {
- dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n",
- res->start);
- ret = -ENOENT;
- goto err_res;
- }
+ ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
+ dev_name(dev), data);
+ if (ret) {
+ dev_err(dev, "Unabled to register handler of irq %d\n", irq);
+ return ret;
}
- for (i = 0; i < data->nsfrs; i++) {
- ret = platform_get_irq(pdev, i);
- if (ret <= 0) {
- dev_dbg(dev, "Unable to find IRQ resource\n");
- goto err_irq;
- }
-
- ret = request_irq(ret, exynos_sysmmu_irq, 0,
- dev_name(dev), data);
+ data->clk = devm_clk_get(dev, "sysmmu");
+ if (IS_ERR(data->clk)) {
+ dev_err(dev, "Failed to get clock!\n");
+ return PTR_ERR(data->clk);
+ } else {
+ ret = clk_prepare(data->clk);
if (ret) {
- dev_dbg(dev, "Unabled to register interrupt handler\n");
- goto err_irq;
+ dev_err(dev, "Failed to prepare clk\n");
+ return ret;
}
}
- if (dev_get_platdata(dev)) {
- char *deli, *beg;
- struct sysmmu_platform_data *platdata = dev_get_platdata(dev);
-
- beg = platdata->clockname;
-
- for (deli = beg; (*deli != '\0') && (*deli != ','); deli++)
- /* NOTHING */;
-
- if (*deli == '\0')
- deli = NULL;
- else
- *deli = '\0';
-
- data->clk[0] = clk_get(dev, beg);
- if (IS_ERR(data->clk[0])) {
- data->clk[0] = NULL;
- dev_dbg(dev, "No clock descriptor registered\n");
- }
-
- if (data->clk[0] && deli) {
- *deli = ',';
- data->clk[1] = clk_get(dev, deli + 1);
- if (IS_ERR(data->clk[1]))
- data->clk[1] = NULL;
+ data->clk_master = devm_clk_get(dev, "master");
+ if (!IS_ERR(data->clk_master)) {
+ ret = clk_prepare(data->clk_master);
+ if (ret) {
+ clk_unprepare(data->clk);
+ dev_err(dev, "Failed to prepare master's clk\n");
+ return ret;
}
-
- data->dbgname = platdata->dbgname;
}
data->sysmmu = dev;
- rwlock_init(&data->lock);
- INIT_LIST_HEAD(&data->node);
+ spin_lock_init(&data->lock);
- __set_fault_handler(data, &default_fault_handler);
+ platform_set_drvdata(pdev, data);
- if (dev->parent)
- pm_runtime_enable(dev);
+ pm_runtime_enable(dev);
- dev_dbg(dev, "(%s) Initialized\n", data->dbgname);
return 0;
-err_irq:
- while (i-- > 0) {
- int irq;
-
- irq = platform_get_irq(pdev, i);
- free_irq(irq, data);
- }
-err_res:
- while (data->nsfrs-- > 0)
- iounmap(data->sfrbases[data->nsfrs]);
- kfree(data->sfrbases);
-err_init:
- kfree(data);
-err_alloc:
- dev_err(dev, "Failed to initialize\n");
- return ret;
}
-static struct platform_driver exynos_sysmmu_driver = {
- .probe = exynos_sysmmu_probe,
- .driver = {
+static const struct of_device_id sysmmu_of_match[] __initconst = {
+ { .compatible = "samsung,exynos-sysmmu", },
+ { },
+};
+
+static struct platform_driver exynos_sysmmu_driver __refdata = {
+ .probe = exynos_sysmmu_probe,
+ .driver = {
.owner = THIS_MODULE,
.name = "exynos-sysmmu",
+ .of_match_table = sysmmu_of_match,
}
};
@@ -667,21 +700,32 @@ static inline void pgtable_flush(void *vastart, void *vaend)
static int exynos_iommu_domain_init(struct iommu_domain *domain)
{
struct exynos_iommu_domain *priv;
+ int i;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- priv->pgtable = (unsigned long *)__get_free_pages(
- GFP_KERNEL | __GFP_ZERO, 2);
+ priv->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
if (!priv->pgtable)
goto err_pgtable;
- priv->lv2entcnt = (short *)__get_free_pages(
- GFP_KERNEL | __GFP_ZERO, 1);
+ priv->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
if (!priv->lv2entcnt)
goto err_counter;
+ /* w/a of System MMU v3.3 to prevent caching 1MiB mapping */
+ for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
+ priv->pgtable[i + 0] = ZERO_LV2LINK;
+ priv->pgtable[i + 1] = ZERO_LV2LINK;
+ priv->pgtable[i + 2] = ZERO_LV2LINK;
+ priv->pgtable[i + 3] = ZERO_LV2LINK;
+ priv->pgtable[i + 4] = ZERO_LV2LINK;
+ priv->pgtable[i + 5] = ZERO_LV2LINK;
+ priv->pgtable[i + 6] = ZERO_LV2LINK;
+ priv->pgtable[i + 7] = ZERO_LV2LINK;
+ }
+
pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
spin_lock_init(&priv->lock);
@@ -705,7 +749,7 @@ err_pgtable:
static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
{
struct exynos_iommu_domain *priv = domain->priv;
- struct sysmmu_drvdata *data;
+ struct exynos_iommu_owner *owner;
unsigned long flags;
int i;
@@ -713,16 +757,20 @@ static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
spin_lock_irqsave(&priv->lock, flags);
- list_for_each_entry(data, &priv->clients, node) {
- while (!exynos_sysmmu_disable(data->dev))
+ list_for_each_entry(owner, &priv->clients, client) {
+ while (!exynos_sysmmu_disable(owner->dev))
; /* until System MMU is actually disabled */
}
+ while (!list_empty(&priv->clients))
+ list_del_init(priv->clients.next);
+
spin_unlock_irqrestore(&priv->lock, flags);
for (i = 0; i < NUM_LV1ENTRIES; i++)
if (lv1ent_page(priv->pgtable + i))
- kfree(__va(lv2table_base(priv->pgtable + i)));
+ kmem_cache_free(lv2table_kmem_cache,
+ phys_to_virt(lv2table_base(priv->pgtable + i)));
free_pages((unsigned long)priv->pgtable, 2);
free_pages((unsigned long)priv->lv2entcnt, 1);
@@ -733,114 +781,134 @@ static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
static int exynos_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
- struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+ struct exynos_iommu_owner *owner = dev->archdata.iommu;
struct exynos_iommu_domain *priv = domain->priv;
+ phys_addr_t pagetable = virt_to_phys(priv->pgtable);
unsigned long flags;
int ret;
- ret = pm_runtime_get_sync(data->sysmmu);
- if (ret < 0)
- return ret;
-
- ret = 0;
-
spin_lock_irqsave(&priv->lock, flags);
- ret = __exynos_sysmmu_enable(data, __pa(priv->pgtable), domain);
-
+ ret = __exynos_sysmmu_enable(dev, pagetable, domain);
if (ret == 0) {
- /* 'data->node' must not be appeared in priv->clients */
- BUG_ON(!list_empty(&data->node));
- data->dev = dev;
- list_add_tail(&data->node, &priv->clients);
+ list_add_tail(&owner->client, &priv->clients);
+ owner->domain = domain;
}
spin_unlock_irqrestore(&priv->lock, flags);
if (ret < 0) {
- dev_err(dev, "%s: Failed to attach IOMMU with pgtable %#lx\n",
- __func__, __pa(priv->pgtable));
- pm_runtime_put(data->sysmmu);
- } else if (ret > 0) {
- dev_dbg(dev, "%s: IOMMU with pgtable 0x%lx already attached\n",
- __func__, __pa(priv->pgtable));
- } else {
- dev_dbg(dev, "%s: Attached new IOMMU with pgtable 0x%lx\n",
- __func__, __pa(priv->pgtable));
+ dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
+ __func__, &pagetable);
+ return ret;
}
+ dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
+ __func__, &pagetable, (ret == 0) ? "" : ", again");
+
return ret;
}
static void exynos_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
- struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+ struct exynos_iommu_owner *owner;
struct exynos_iommu_domain *priv = domain->priv;
- struct list_head *pos;
+ phys_addr_t pagetable = virt_to_phys(priv->pgtable);
unsigned long flags;
- bool found = false;
spin_lock_irqsave(&priv->lock, flags);
- list_for_each(pos, &priv->clients) {
- if (list_entry(pos, struct sysmmu_drvdata, node) == data) {
- found = true;
+ list_for_each_entry(owner, &priv->clients, client) {
+ if (owner == dev->archdata.iommu) {
+ if (exynos_sysmmu_disable(dev)) {
+ list_del_init(&owner->client);
+ owner->domain = NULL;
+ }
break;
}
}
- if (!found)
- goto finish;
-
- if (__exynos_sysmmu_disable(data)) {
- dev_dbg(dev, "%s: Detached IOMMU with pgtable %#lx\n",
- __func__, __pa(priv->pgtable));
- list_del_init(&data->node);
-
- } else {
- dev_dbg(dev, "%s: Detaching IOMMU with pgtable %#lx delayed",
- __func__, __pa(priv->pgtable));
- }
-
-finish:
spin_unlock_irqrestore(&priv->lock, flags);
- if (found)
- pm_runtime_put(data->sysmmu);
+ if (owner == dev->archdata.iommu)
+ dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
+ __func__, &pagetable);
+ else
+ dev_err(dev, "%s: No IOMMU is attached\n", __func__);
}
-static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,
- short *pgcounter)
+static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *priv,
+ sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
{
+ if (lv1ent_section(sent)) {
+ WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
+ return ERR_PTR(-EADDRINUSE);
+ }
+
if (lv1ent_fault(sent)) {
- unsigned long *pent;
+ sysmmu_pte_t *pent;
+ bool need_flush_flpd_cache = lv1ent_zero(sent);
- pent = kzalloc(LV2TABLE_SIZE, GFP_ATOMIC);
- BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
+ pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
+ BUG_ON((unsigned int)pent & (LV2TABLE_SIZE - 1));
if (!pent)
- return NULL;
+ return ERR_PTR(-ENOMEM);
- *sent = mk_lv1ent_page(__pa(pent));
+ *sent = mk_lv1ent_page(virt_to_phys(pent));
*pgcounter = NUM_LV2ENTRIES;
pgtable_flush(pent, pent + NUM_LV2ENTRIES);
pgtable_flush(sent, sent + 1);
+
+ /*
+ * If pretched SLPD is a fault SLPD in zero_l2_table, FLPD cache
+ * may caches the address of zero_l2_table. This function
+ * replaces the zero_l2_table with new L2 page table to write
+ * valid mappings.
+ * Accessing the valid area may cause page fault since FLPD
+ * cache may still caches zero_l2_table for the valid area
+ * instead of new L2 page table that have the mapping
+ * information of the valid area
+ * Thus any replacement of zero_l2_table with other valid L2
+ * page table must involve FLPD cache invalidation for System
+ * MMU v3.3.
+ * FLPD cache invalidation is performed with TLB invalidation
+ * by VPN without blocking. It is safe to invalidate TLB without
+ * blocking because the target address of TLB invalidation is
+ * not currently mapped.
+ */
+ if (need_flush_flpd_cache) {
+ struct exynos_iommu_owner *owner;
+
+ spin_lock(&priv->lock);
+ list_for_each_entry(owner, &priv->clients, client)
+ sysmmu_tlb_invalidate_flpdcache(
+ owner->dev, iova);
+ spin_unlock(&priv->lock);
+ }
}
return page_entry(sent, iova);
}
-static int lv1set_section(unsigned long *sent, phys_addr_t paddr, short *pgcnt)
+static int lv1set_section(struct exynos_iommu_domain *priv,
+ sysmmu_pte_t *sent, sysmmu_iova_t iova,
+ phys_addr_t paddr, short *pgcnt)
{
- if (lv1ent_section(sent))
+ if (lv1ent_section(sent)) {
+ WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
+ iova);
return -EADDRINUSE;
+ }
if (lv1ent_page(sent)) {
- if (*pgcnt != NUM_LV2ENTRIES)
+ if (*pgcnt != NUM_LV2ENTRIES) {
+ WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
+ iova);
return -EADDRINUSE;
+ }
- kfree(page_entry(sent, 0));
-
+ kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
*pgcnt = 0;
}
@@ -848,14 +916,26 @@ static int lv1set_section(unsigned long *sent, phys_addr_t paddr, short *pgcnt)
pgtable_flush(sent, sent + 1);
+ spin_lock(&priv->lock);
+ if (lv1ent_page_zero(sent)) {
+ struct exynos_iommu_owner *owner;
+ /*
+ * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
+ * entry by speculative prefetch of SLPD which has no mapping.
+ */
+ list_for_each_entry(owner, &priv->clients, client)
+ sysmmu_tlb_invalidate_flpdcache(owner->dev, iova);
+ }
+ spin_unlock(&priv->lock);
+
return 0;
}
-static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
+static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
short *pgcnt)
{
if (size == SPAGE_SIZE) {
- if (!lv2ent_fault(pent))
+ if (WARN_ON(!lv2ent_fault(pent)))
return -EADDRINUSE;
*pent = mk_lv2ent_spage(paddr);
@@ -863,9 +943,11 @@ static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
*pgcnt -= 1;
} else { /* size == LPAGE_SIZE */
int i;
+
for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
- if (!lv2ent_fault(pent)) {
- memset(pent, 0, sizeof(*pent) * i);
+ if (WARN_ON(!lv2ent_fault(pent))) {
+ if (i > 0)
+ memset(pent - i, 0, sizeof(*pent) * i);
return -EADDRINUSE;
}
@@ -878,11 +960,38 @@ static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
return 0;
}
-static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova,
+/*
+ * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
+ *
+ * System MMU v3.x have an advanced logic to improve address translation
+ * performance with caching more page table entries by a page table walk.
+ * However, the logic has a bug that caching fault page table entries and System
+ * MMU reports page fault if the cached fault entry is hit even though the fault
+ * entry is updated to a valid entry after the entry is cached.
+ * To prevent caching fault page table entries which may be updated to valid
+ * entries later, the virtual memory manager should care about the w/a about the
+ * problem. The followings describe w/a.
+ *
+ * Any two consecutive I/O virtual address regions must have a hole of 128KiB
+ * in maximum to prevent misbehavior of System MMU 3.x. (w/a of h/w bug)
+ *
+ * Precisely, any start address of I/O virtual region must be aligned by
+ * the following sizes for System MMU v3.1 and v3.2.
+ * System MMU v3.1: 128KiB
+ * System MMU v3.2: 256KiB
+ *
+ * Because System MMU v3.3 caches page table entries more aggressively, it needs
+ * more w/a.
+ * - Any two consecutive I/O virtual regions must be have a hole of larger size
+ * than or equal size to 128KiB.
+ * - Start address of an I/O virtual region must be aligned by 128KiB.
+ */
+static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova,
phys_addr_t paddr, size_t size, int prot)
{
struct exynos_iommu_domain *priv = domain->priv;
- unsigned long *entry;
+ sysmmu_pte_t *entry;
+ sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
unsigned long flags;
int ret = -ENOMEM;
@@ -893,38 +1002,52 @@ static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova,
entry = section_entry(priv->pgtable, iova);
if (size == SECT_SIZE) {
- ret = lv1set_section(entry, paddr,
+ ret = lv1set_section(priv, entry, iova, paddr,
&priv->lv2entcnt[lv1ent_offset(iova)]);
} else {
- unsigned long *pent;
+ sysmmu_pte_t *pent;
- pent = alloc_lv2entry(entry, iova,
+ pent = alloc_lv2entry(priv, entry, iova,
&priv->lv2entcnt[lv1ent_offset(iova)]);
- if (!pent)
- ret = -ENOMEM;
+ if (IS_ERR(pent))
+ ret = PTR_ERR(pent);
else
ret = lv2set_page(pent, paddr, size,
&priv->lv2entcnt[lv1ent_offset(iova)]);
}
- if (ret) {
- pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n",
- __func__, iova, size);
- }
+ if (ret)
+ pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
+ __func__, ret, size, iova);
spin_unlock_irqrestore(&priv->pgtablelock, flags);
return ret;
}
+static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *priv,
+ sysmmu_iova_t iova, size_t size)
+{
+ struct exynos_iommu_owner *owner;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ list_for_each_entry(owner, &priv->clients, client)
+ sysmmu_tlb_invalidate_entry(owner->dev, iova, size);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
static size_t exynos_iommu_unmap(struct iommu_domain *domain,
- unsigned long iova, size_t size)
+ unsigned long l_iova, size_t size)
{
struct exynos_iommu_domain *priv = domain->priv;
- struct sysmmu_drvdata *data;
+ sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
+ sysmmu_pte_t *ent;
+ size_t err_pgsize;
unsigned long flags;
- unsigned long *ent;
BUG_ON(priv->pgtable == NULL);
@@ -933,9 +1056,12 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain,
ent = section_entry(priv->pgtable, iova);
if (lv1ent_section(ent)) {
- BUG_ON(size < SECT_SIZE);
+ if (WARN_ON(size < SECT_SIZE)) {
+ err_pgsize = SECT_SIZE;
+ goto err;
+ }
- *ent = 0;
+ *ent = ZERO_LV2LINK; /* w/a for h/w bug in Sysmem MMU v3.3 */
pgtable_flush(ent, ent + 1);
size = SECT_SIZE;
goto done;
@@ -959,34 +1085,42 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain,
if (lv2ent_small(ent)) {
*ent = 0;
size = SPAGE_SIZE;
+ pgtable_flush(ent, ent + 1);
priv->lv2entcnt[lv1ent_offset(iova)] += 1;
goto done;
}
/* lv1ent_large(ent) == true here */
- BUG_ON(size < LPAGE_SIZE);
+ if (WARN_ON(size < LPAGE_SIZE)) {
+ err_pgsize = LPAGE_SIZE;
+ goto err;
+ }
memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
+ pgtable_flush(ent, ent + SPAGES_PER_LPAGE);
size = LPAGE_SIZE;
priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
done:
spin_unlock_irqrestore(&priv->pgtablelock, flags);
- spin_lock_irqsave(&priv->lock, flags);
- list_for_each_entry(data, &priv->clients, node)
- sysmmu_tlb_invalidate_entry(data->dev, iova);
- spin_unlock_irqrestore(&priv->lock, flags);
-
+ exynos_iommu_tlb_invalidate_entry(priv, iova, size);
return size;
+err:
+ spin_unlock_irqrestore(&priv->pgtablelock, flags);
+
+ pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
+ __func__, size, iova, err_pgsize);
+
+ return 0;
}
static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
struct exynos_iommu_domain *priv = domain->priv;
- unsigned long *entry;
+ sysmmu_pte_t *entry;
unsigned long flags;
phys_addr_t phys = 0;
@@ -1010,14 +1144,42 @@ static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
return phys;
}
+static int exynos_iommu_add_device(struct device *dev)
+{
+ struct iommu_group *group;
+ int ret;
+
+ group = iommu_group_get(dev);
+
+ if (!group) {
+ group = iommu_group_alloc();
+ if (IS_ERR(group)) {
+ dev_err(dev, "Failed to allocate IOMMU group\n");
+ return PTR_ERR(group);
+ }
+ }
+
+ ret = iommu_group_add_device(group, dev);
+ iommu_group_put(group);
+
+ return ret;
+}
+
+static void exynos_iommu_remove_device(struct device *dev)
+{
+ iommu_group_remove_device(dev);
+}
+
static struct iommu_ops exynos_iommu_ops = {
- .domain_init = &exynos_iommu_domain_init,
- .domain_destroy = &exynos_iommu_domain_destroy,
- .attach_dev = &exynos_iommu_attach_device,
- .detach_dev = &exynos_iommu_detach_device,
- .map = &exynos_iommu_map,
- .unmap = &exynos_iommu_unmap,
- .iova_to_phys = &exynos_iommu_iova_to_phys,
+ .domain_init = exynos_iommu_domain_init,
+ .domain_destroy = exynos_iommu_domain_destroy,
+ .attach_dev = exynos_iommu_attach_device,
+ .detach_dev = exynos_iommu_detach_device,
+ .map = exynos_iommu_map,
+ .unmap = exynos_iommu_unmap,
+ .iova_to_phys = exynos_iommu_iova_to_phys,
+ .add_device = exynos_iommu_add_device,
+ .remove_device = exynos_iommu_remove_device,
.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
};
@@ -1025,11 +1187,41 @@ static int __init exynos_iommu_init(void)
{
int ret;
+ lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
+ LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
+ if (!lv2table_kmem_cache) {
+ pr_err("%s: Failed to create kmem cache\n", __func__);
+ return -ENOMEM;
+ }
+
ret = platform_driver_register(&exynos_sysmmu_driver);
+ if (ret) {
+ pr_err("%s: Failed to register driver\n", __func__);
+ goto err_reg_driver;
+ }
- if (ret == 0)
- bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
+ zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
+ if (zero_lv2_table == NULL) {
+ pr_err("%s: Failed to allocate zero level2 page table\n",
+ __func__);
+ ret = -ENOMEM;
+ goto err_zero_lv2;
+ }
+ ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
+ if (ret) {
+ pr_err("%s: Failed to register exynos-iommu driver.\n",
+ __func__);
+ goto err_set_iommu;
+ }
+
+ return 0;
+err_set_iommu:
+ kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
+err_zero_lv2:
+ platform_driver_unregister(&exynos_sysmmu_driver);
+err_reg_driver:
+ kmem_cache_destroy(lv2table_kmem_cache);
return ret;
}
subsys_initcall(exynos_iommu_init);
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index cba0498..b99dd88 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -592,8 +592,7 @@ found_cpu_node:
/* advance to next node in cache hierarchy */
node = of_find_node_by_phandle(*prop);
if (!node) {
- pr_debug("Invalid node for cache hierarchy %s\n",
- node->full_name);
+ pr_debug("Invalid node for cache hierarchy\n");
return ~(u32)0;
}
}
diff --git a/drivers/iommu/msm_iommu_dev.c b/drivers/iommu/msm_iommu_dev.c
index 08ba497..61def7cb 100644
--- a/drivers/iommu/msm_iommu_dev.c
+++ b/drivers/iommu/msm_iommu_dev.c
@@ -127,13 +127,12 @@ static void msm_iommu_reset(void __iomem *base, int ncb)
static int msm_iommu_probe(struct platform_device *pdev)
{
- struct resource *r, *r2;
+ struct resource *r;
struct clk *iommu_clk;
struct clk *iommu_pclk;
struct msm_iommu_drvdata *drvdata;
struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data;
void __iomem *regs_base;
- resource_size_t len;
int ret, irq, par;
if (pdev->id == -1) {
@@ -178,35 +177,16 @@ static int msm_iommu_probe(struct platform_device *pdev)
iommu_clk = NULL;
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "physbase");
-
- if (!r) {
- ret = -ENODEV;
- goto fail_clk;
- }
-
- len = resource_size(r);
-
- r2 = request_mem_region(r->start, len, r->name);
- if (!r2) {
- pr_err("Could not request memory region: start=%p, len=%d\n",
- (void *) r->start, len);
- ret = -EBUSY;
+ regs_base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(regs_base)) {
+ ret = PTR_ERR(regs_base);
goto fail_clk;
}
- regs_base = ioremap(r2->start, len);
-
- if (!regs_base) {
- pr_err("Could not ioremap: start=%p, len=%d\n",
- (void *) r2->start, len);
- ret = -EBUSY;
- goto fail_mem;
- }
-
irq = platform_get_irq_byname(pdev, "secure_irq");
if (irq < 0) {
ret = -ENODEV;
- goto fail_io;
+ goto fail_clk;
}
msm_iommu_reset(regs_base, iommu_dev->ncb);
@@ -222,14 +202,14 @@ static int msm_iommu_probe(struct platform_device *pdev)
if (!par) {
pr_err("%s: Invalid PAR value detected\n", iommu_dev->name);
ret = -ENODEV;
- goto fail_io;
+ goto fail_clk;
}
ret = request_irq(irq, msm_iommu_fault_handler, 0,
"msm_iommu_secure_irpt_handler", drvdata);
if (ret) {
pr_err("Request IRQ %d failed with ret=%d\n", irq, ret);
- goto fail_io;
+ goto fail_clk;
}
@@ -250,10 +230,6 @@ static int msm_iommu_probe(struct platform_device *pdev)
clk_disable(iommu_pclk);
return 0;
-fail_io:
- iounmap(regs_base);
-fail_mem:
- release_mem_region(r->start, len);
fail_clk:
if (iommu_clk) {
clk_disable(iommu_clk);
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 7fcbfc4..895af06 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -34,6 +34,9 @@
#include "omap-iopgtable.h"
#include "omap-iommu.h"
+#define to_iommu(dev) \
+ ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
+
#define for_each_iotlb_cr(obj, n, __i, cr) \
for (__i = 0; \
(__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
@@ -391,6 +394,7 @@ static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
__func__, start, da, bytes);
iotlb_load_cr(obj, &cr);
iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
+ break;
}
}
pm_runtime_put_sync(obj->dev);
@@ -1037,19 +1041,18 @@ static void iopte_cachep_ctor(void *iopte)
clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
}
-static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa,
- u32 flags)
+static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
{
memset(e, 0, sizeof(*e));
e->da = da;
e->pa = pa;
- e->valid = 1;
+ e->valid = MMU_CAM_V;
/* FIXME: add OMAP1 support */
- e->pgsz = flags & MMU_CAM_PGSZ_MASK;
- e->endian = flags & MMU_RAM_ENDIAN_MASK;
- e->elsz = flags & MMU_RAM_ELSZ_MASK;
- e->mixed = flags & MMU_RAM_MIXED_MASK;
+ e->pgsz = pgsz;
+ e->endian = MMU_RAM_ENDIAN_LITTLE;
+ e->elsz = MMU_RAM_ELSZ_8;
+ e->mixed = 0;
return iopgsz_to_bytes(e->pgsz);
}
@@ -1062,9 +1065,8 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
struct device *dev = oiommu->dev;
struct iotlb_entry e;
int omap_pgsz;
- u32 ret, flags;
+ u32 ret;
- /* we only support mapping a single iommu page for now */
omap_pgsz = bytes_to_iopgsz(bytes);
if (omap_pgsz < 0) {
dev_err(dev, "invalid size to map: %d\n", bytes);
@@ -1073,9 +1075,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes);
- flags = omap_pgsz | prot;
-
- iotlb_init_entry(&e, da, pa, flags);
+ iotlb_init_entry(&e, da, pa, omap_pgsz);
ret = omap_iopgtable_store_entry(oiommu, &e);
if (ret)
@@ -1248,12 +1248,6 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
return ret;
}
-static int omap_iommu_domain_has_cap(struct iommu_domain *domain,
- unsigned long cap)
-{
- return 0;
-}
-
static int omap_iommu_add_device(struct device *dev)
{
struct omap_iommu_arch_data *arch_data;
@@ -1305,7 +1299,6 @@ static struct iommu_ops omap_iommu_ops = {
.map = omap_iommu_map,
.unmap = omap_iommu_unmap,
.iova_to_phys = omap_iommu_iova_to_phys,
- .domain_has_cap = omap_iommu_domain_has_cap,
.add_device = omap_iommu_add_device,
.remove_device = omap_iommu_remove_device,
.pgsize_bitmap = OMAP_IOMMU_PGSIZES,
diff --git a/drivers/iommu/omap-iopgtable.h b/drivers/iommu/omap-iopgtable.h
index b6f9a51..f891683 100644
--- a/drivers/iommu/omap-iopgtable.h
+++ b/drivers/iommu/omap-iopgtable.h
@@ -93,6 +93,3 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
/* to find an entry in the second-level page table. */
#define iopte_index(da) (((da) >> IOPTE_SHIFT) & (PTRS_PER_IOPTE - 1))
#define iopte_offset(iopgd, da) (iopgd_page_vaddr(iopgd) + iopte_index(da))
-
-#define to_iommu(dev) \
- (platform_get_drvdata(to_platform_device(dev)))
diff --git a/drivers/iommu/shmobile-ipmmu.c b/drivers/iommu/shmobile-ipmmu.c
index e3bc2e1..bd97ade 100644
--- a/drivers/iommu/shmobile-ipmmu.c
+++ b/drivers/iommu/shmobile-ipmmu.c
@@ -94,11 +94,6 @@ static int ipmmu_probe(struct platform_device *pdev)
struct resource *res;
struct shmobile_ipmmu_platform_data *pdata = pdev->dev.platform_data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "cannot get platform resources\n");
- return -ENOENT;
- }
ipmmu = devm_kzalloc(&pdev->dev, sizeof(*ipmmu), GFP_KERNEL);
if (!ipmmu) {
dev_err(&pdev->dev, "cannot allocate device data\n");
@@ -106,19 +101,18 @@ static int ipmmu_probe(struct platform_device *pdev)
}
spin_lock_init(&ipmmu->flush_lock);
ipmmu->dev = &pdev->dev;
- ipmmu->ipmmu_base = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
- if (!ipmmu->ipmmu_base) {
- dev_err(&pdev->dev, "ioremap_nocache failed\n");
- return -ENOMEM;
- }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ipmmu->ipmmu_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ipmmu->ipmmu_base))
+ return PTR_ERR(ipmmu->ipmmu_base);
+
ipmmu->dev_names = pdata->dev_names;
ipmmu->num_dev_names = pdata->num_dev_names;
platform_set_drvdata(pdev, ipmmu);
ipmmu_reg_write(ipmmu, IMCTR1, 0x0); /* disable TLB */
ipmmu_reg_write(ipmmu, IMCTR2, 0x0); /* disable PMB */
- ipmmu_iommu_init(ipmmu);
- return 0;
+ return ipmmu_iommu_init(ipmmu);
}
static struct platform_driver ipmmu_driver = {