summaryrefslogtreecommitdiff
path: root/drivers/iommu/arm-smmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/arm-smmu.c')
-rw-r--r--drivers/iommu/arm-smmu.c100
1 files changed, 84 insertions, 16 deletions
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 5a9a441..382a1d3 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -49,6 +49,7 @@
#include <linux/spinlock.h>
#include <linux/amba/bus.h>
+#include "../staging/fsl-mc/include/mc-bus.h"
#include "io-pgtable.h"
@@ -247,6 +248,7 @@ enum arm_smmu_s2cr_privcfg {
#define ARM_MMU500_ACTLR_CPRE (1 << 1)
#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
+#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
#define CB_PAR_F (1 << 0)
@@ -278,6 +280,9 @@ enum arm_smmu_s2cr_privcfg {
#define FSYNR0_WNR (1 << 4)
+#define MSI_IOVA_BASE 0x8000000
+#define MSI_IOVA_LENGTH 0x100000
+
static int force_stage;
module_param(force_stage, int, S_IRUGO);
MODULE_PARM_DESC(force_stage,
@@ -401,6 +406,7 @@ enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_S1 = 0,
ARM_SMMU_DOMAIN_S2,
ARM_SMMU_DOMAIN_NESTED,
+ ARM_SMMU_DOMAIN_BYPASS,
};
struct arm_smmu_domain {
@@ -821,6 +827,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (smmu_domain->smmu)
goto out_unlock;
+ if (domain->type == IOMMU_DOMAIN_IDENTITY) {
+ smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
+ smmu_domain->smmu = smmu;
+ goto out_unlock;
+ }
+
/*
* Mapping the requested stage onto what we support is surprisingly
* complicated, mainly because the spec allows S1+S2 SMMUs without
@@ -981,7 +993,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
void __iomem *cb_base;
int irq;
- if (!smmu)
+ if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
return;
/*
@@ -1004,7 +1016,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
struct arm_smmu_domain *smmu_domain;
- if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
+ if (type != IOMMU_DOMAIN_UNMANAGED &&
+ type != IOMMU_DOMAIN_DMA &&
+ type != IOMMU_DOMAIN_IDENTITY)
return NULL;
/*
* Allocate the domain and initialise some of its data structures.
@@ -1202,10 +1216,15 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
{
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_s2cr *s2cr = smmu->s2crs;
- enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
u8 cbndx = smmu_domain->cfg.cbndx;
+ enum arm_smmu_s2cr_type type;
int i, idx;
+ if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
+ type = S2CR_TYPE_BYPASS;
+ else
+ type = S2CR_TYPE_TRANS;
+
for_each_cfg_sme(fwspec, i, idx) {
if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
continue;
@@ -1343,6 +1362,9 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
+ if (domain->type == IOMMU_DOMAIN_IDENTITY)
+ return iova;
+
if (!ops)
return 0;
@@ -1368,8 +1390,6 @@ static bool arm_smmu_capable(enum iommu_cap cap)
* requests.
*/
return true;
- case IOMMU_CAP_INTR_REMAP:
- return true; /* MSIs are just memory writes */
case IOMMU_CAP_NOEXEC:
return true;
default:
@@ -1478,10 +1498,12 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
}
if (group)
- return group;
+ return iommu_group_ref_get(group);
if (dev_is_pci(dev))
group = pci_device_group(dev);
+ else if (dev_is_fsl_mc(dev))
+ group = fsl_mc_device_group(dev);
else
group = generic_device_group(dev);
@@ -1493,6 +1515,9 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ if (domain->type != IOMMU_DOMAIN_UNMANAGED)
+ return -EINVAL;
+
switch (attr) {
case DOMAIN_ATTR_NESTING:
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
@@ -1508,6 +1533,9 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
int ret = 0;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ if (domain->type != IOMMU_DOMAIN_UNMANAGED)
+ return -EINVAL;
+
mutex_lock(&smmu_domain->init_mutex);
switch (attr) {
@@ -1534,17 +1562,44 @@ out_unlock:
static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
{
- u32 fwid = 0;
+ u32 mask, fwid = 0;
if (args->args_count > 0)
fwid |= (u16)args->args[0];
if (args->args_count > 1)
fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
+ else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
+ fwid |= (u16)mask << SMR_MASK_SHIFT;
return iommu_fwspec_add_ids(dev, &fwid, 1);
}
+static void arm_smmu_get_resv_regions(struct device *dev,
+ struct list_head *head)
+{
+ struct iommu_resv_region *region;
+ int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
+
+ region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
+ prot, IOMMU_RESV_SW_MSI);
+ if (!region)
+ return;
+
+ list_add_tail(&region->list, head);
+
+ iommu_dma_get_resv_regions(dev, head);
+}
+
+static void arm_smmu_put_resv_regions(struct device *dev,
+ struct list_head *head)
+{
+ struct iommu_resv_region *entry, *next;
+
+ list_for_each_entry_safe(entry, next, head, list)
+ kfree(entry);
+}
+
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
@@ -1560,6 +1615,8 @@ static struct iommu_ops arm_smmu_ops = {
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
.of_xlate = arm_smmu_of_xlate,
+ .get_resv_regions = arm_smmu_get_resv_regions,
+ .put_resv_regions = arm_smmu_put_resv_regions,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
};
@@ -1581,16 +1638,22 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
for (i = 0; i < smmu->num_mapping_groups; ++i)
arm_smmu_write_sme(smmu, i);
- /*
- * Before clearing ARM_MMU500_ACTLR_CPRE, need to
- * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
- * bit is only present in MMU-500r2 onwards.
- */
- reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
- major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
- if ((smmu->model == ARM_MMU500) && (major >= 2)) {
+ if (smmu->model == ARM_MMU500) {
+ /*
+ * Before clearing ARM_MMU500_ACTLR_CPRE, need to
+ * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
+ * bit is only present in MMU-500r2 onwards.
+ */
+ reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
+ major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
- reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
+ if (major >= 2)
+ reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
+ /*
+ * Allow unmatched Stream IDs to allocate bypass
+ * TLB entries for reduced latency.
+ */
+ reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;
writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
}
@@ -2024,6 +2087,11 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
}
#endif
+#ifdef CONFIG_FSL_MC_BUS
+ if (!iommu_present(&fsl_mc_bus_type))
+ bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
+#endif
+
return 0;
}