summaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/Kconfig12
-rw-r--r--drivers/iommu/fsl_pamu.c406
-rw-r--r--drivers/iommu/fsl_pamu.h9
-rw-r--r--drivers/iommu/fsl_pamu_domain.c376
-rw-r--r--drivers/iommu/fsl_pamu_domain.h14
-rw-r--r--drivers/iommu/iommu.c10
6 files changed, 662 insertions, 165 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index c880eba..3466972 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -17,6 +17,18 @@ config OF_IOMMU
def_bool y
depends on OF
+config DUMMY_IOMMU
+ bool "Dummy IOMMU support for VFIO (UNSAFE)"
+ select IOMMU_API
+ help
+ WARNING: Dummy IOMMU driver, only for platforms that don't have a
+ hardware IOMMU. This option does allow you to use VFIO device
+ assignment, but does not provide DMA memory isolation.
+ Only use this if you trust all VFIO users completely.
+ The dummy vfio iommu driver can be enabled via
+ /sys/kernel/vfio_iommu_dummy/enable_iommu_dummy attribute.
+ If unsure, say N.
+
config FSL_PAMU
bool "Freescale IOMMU support"
depends on PPC_E500MC
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index cba0498..b33561e 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -32,6 +32,9 @@
#include <asm/io.h>
#include <asm/bitops.h>
#include <asm/fsl_guts.h>
+#include <asm/fsl_kibo.h>
+#include <asm/mpc85xx.h>
+#include <linux/syscore_ops.h>
#include "fsl_pamu.h"
@@ -43,10 +46,13 @@
#define make64(high, low) (((u64)(high) << 32) | (low))
-struct pamu_isr_data {
+struct pamu_info {
void __iomem *pamu_reg_base; /* Base address of PAMU regs*/
unsigned int count; /* The number of PAMUs */
-};
+} pamu_info_data;
+
+/* Pointer to the device configuration space */
+static struct ccsr_guts __iomem *guts_regs;
static struct paace *ppaact;
static struct paace *spaact;
@@ -81,6 +87,13 @@ static const struct of_device_id l3_device_ids[] = {
{}
};
+/* Table for matching FMAN rx port compatible */
+static const struct of_device_id fman_device_ids[] = {
+ { .compatible = "fsl,fman-port-10g-rx", },
+ { .compatible = "fsl,fman-port-1g-rx", },
+ {}
+};
+
/* maximum subwindows permitted per liodn */
static u32 max_subwindow_count;
@@ -115,6 +128,34 @@ static struct paace *pamu_get_ppaace(int liodn)
}
/**
+ * set_dcfg_liodn() - set the device LIODN in DCFG
+ * @np: device tree node pointer
+ * @liodn: liodn value to program
+ *
+ * Returns 0 upon success else error code < 0 returned
+ */
+static int set_dcfg_liodn(struct device_node *np, int liodn)
+{
+ const __be32 *prop;
+ u32 liodn_reg_offset;
+ int len;
+ void __iomem *dcfg_region = (void *)guts_regs;
+
+ if (!dcfg_region)
+ return -ENODEV;
+
+ prop = of_get_property(np, "fsl,liodn-reg", &len);
+ if (!prop || len != 8)
+ return -EINVAL;
+
+ liodn_reg_offset = be32_to_cpup(&prop[1]);
+
+ out_be32((u32 *)(dcfg_region + liodn_reg_offset), liodn);
+
+ return 0;
+}
+
+/**
* pamu_enable_liodn() - Set valid bit of PACCE
* @liodn: liodn PAACT index for desired PAACE
*
@@ -170,10 +211,10 @@ int pamu_disable_liodn(int liodn)
static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
{
/* Bug if not a power of 2 */
- BUG_ON(!is_power_of_2(addrspace_size));
+ BUG_ON((addrspace_size & (addrspace_size - 1)));
/* window size is 2^(WSE+1) bytes */
- return __ffs(addrspace_size) - 1;
+ return fls64(addrspace_size) - 1 - 1;
}
/* Derive the PAACE window count encoding for the subwindow count */
@@ -253,6 +294,42 @@ static unsigned long pamu_get_fspi_and_allocate(u32 subwin_cnt)
return (spaace_addr - (unsigned long)spaact) / (sizeof(struct paace));
}
+/*
+ * Default PPAACE settings for an LIODN.
+ */
+static void setup_default_ppaace(struct paace *ppaace)
+{
+ pamu_init_ppaace(ppaace);
+ /* window size is 2^(WSE+1) bytes */
+ set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE, 35);
+ ppaace->wbah = 0;
+ set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL, 0);
+ set_bf(ppaace->impl_attr, PAACE_IA_ATM,
+ PAACE_ATM_NO_XLATE);
+ set_bf(ppaace->addr_bitfields, PAACE_AF_AP,
+ PAACE_AP_PERMS_ALL);
+}
+
+/* Reset the PAACE entry to the default state */
+void enable_default_dma_window(int liodn)
+{
+ struct paace *ppaace;
+
+ ppaace = pamu_get_ppaace(liodn);
+ if (!ppaace) {
+ pr_debug("Invalid liodn entry\n");
+ return;
+ }
+
+ memset(ppaace, 0, sizeof(struct paace));
+
+ setup_default_ppaace(ppaace);
+
+ /* Ensure that all other stores to the ppaace complete first */
+ mb();
+ pamu_enable_liodn(liodn);
+}
+
/* Release the subwindows reserved for a particular LIODN */
void pamu_free_subwins(int liodn)
{
@@ -274,16 +351,16 @@ void pamu_free_subwins(int liodn)
}
/*
- * Function used for updating stash destination for the coressponding
+ * Function used for updating a specifc PAACE field for the coressponding
* LIODN.
*/
-int pamu_update_paace_stash(int liodn, u32 subwin, u32 value)
+int pamu_update_paace_field(int liodn, u32 subwin, int field, u32 value)
{
struct paace *paace;
paace = pamu_get_ppaace(liodn);
if (!paace) {
- pr_debug("Invalid liodn entry\n");
+ pr_err("Invalid liodn entry\n");
return -ENOENT;
}
if (subwin) {
@@ -292,8 +369,19 @@ int pamu_update_paace_stash(int liodn, u32 subwin, u32 value)
return -ENOENT;
}
}
- set_bf(paace->impl_attr, PAACE_IA_CID, value);
+ switch (field) {
+ case PAACE_STASH_FIELD:
+ set_bf(paace->impl_attr, PAACE_IA_CID, value);
+ break;
+ case PAACE_OMI_FIELD:
+ set_bf(paace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
+ paace->op_encode.index_ot.omi = value;
+ break;
+ default:
+ pr_debug("Invalid field, can't update\n");
+ return -EINVAL;
+ }
mb();
return 0;
@@ -351,7 +439,7 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
struct paace *ppaace;
unsigned long fspi;
- if (!is_power_of_2(win_size) || win_size < PAMU_PAGE_SIZE) {
+ if ((win_size & (win_size - 1)) || win_size < PAMU_PAGE_SIZE) {
pr_debug("window size too small or not a power of two %llx\n", win_size);
return -EINVAL;
}
@@ -464,7 +552,7 @@ int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
return -ENOENT;
}
- if (!is_power_of_2(subwin_size) || subwin_size < PAMU_PAGE_SIZE) {
+ if ((subwin_size & (subwin_size - 1)) || subwin_size < PAMU_PAGE_SIZE) {
pr_debug("subwindow size out of range, or not a power of 2\n");
return -EINVAL;
}
@@ -523,6 +611,83 @@ void get_ome_index(u32 *omi_index, struct device *dev)
*omi_index = OMI_QMAN_PRIV;
}
+/*
+ * We get the stash id programmed by SDOS from the shared
+ * cluster L2 l2csr1 register.
+ */
+static u32 get_dsp_l2_stash_id(u32 vcpu)
+{
+ const u32 *prop;
+ struct device_node *node;
+ struct ccsr_cluster_l2 *l2cache_regs;
+ u32 stash_id;
+
+ for_each_compatible_node(node, NULL, "fsl,sc3900") {
+ prop = of_get_property(node, "reg", 0);
+ if (!prop) {
+ pr_err("missing reg property in dsp cpu node %s\n",
+ node->full_name);
+ of_node_put(node);
+ return ~(u32)0;
+ }
+
+ if (*prop != vcpu)
+ continue;
+
+ prop = of_get_property(node, "next-level-cache", 0);
+ if (!prop) {
+ pr_err("missing next level cache property in dsp cpu %s\n",
+ node->full_name);
+ of_node_put(node);
+ return ~(u32)0;
+ }
+ of_node_put(node);
+
+ node = of_find_node_by_phandle(*prop);
+ if (!node) {
+ pr_err("Invalid node for cache hierarchy %s\n",
+ node->full_name);
+ return ~(u32)0;
+ }
+
+ l2cache_regs = of_iomap(node, 0);
+ if (!l2cache_regs) {
+ pr_err("failed to map cluster l2 cache registers %s\n",
+ node->full_name);
+ of_node_put(node);
+ return ~(u32)0;
+ }
+
+ stash_id = in_be32(&l2cache_regs->l2csr1) &
+ CLUSTER_L2_STASH_MASK;
+ of_node_put(node);
+ iounmap(l2cache_regs);
+
+ return stash_id;
+ }
+ return ~(u32)0;
+}
+
+static bool has_erratum_a007907(void)
+{
+ u32 svr = mfspr(SPRN_SVR);
+
+ switch (SVR_SOC_VER(svr)) {
+ case SVR_B4860:
+ case SVR_B4420:
+ case SVR_T4240:
+ case SVR_T4160:
+ return SVR_REV(svr) <= 0x20;
+
+ case SVR_T2080:
+ case SVR_T2081:
+ return SVR_REV(svr) == 0x10;
+
+ default:
+ return false;
+ };
+}
+
/**
* get_stash_id - Returns stash destination id corresponding to a
* cache type and vcpu.
@@ -540,6 +705,15 @@ u32 get_stash_id(u32 stash_dest_hint, u32 vcpu)
int len, found = 0;
int i;
+ if (stash_dest_hint == PAMU_ATTR_CACHE_L1 &&
+ has_erratum_a007907())
+ stash_dest_hint = PAMU_ATTR_CACHE_L2;
+
+ /* check for DSP L2 cache */
+ if (stash_dest_hint == PAMU_ATTR_CACHE_DSP_L2) {
+ return get_dsp_l2_stash_id(vcpu);
+ }
+
/* Fastpath, exit early if L3/CPC cache is target for stashing */
if (stash_dest_hint == PAMU_ATTR_CACHE_L3) {
node = of_find_matching_node(NULL, l3_device_ids);
@@ -607,13 +781,16 @@ found_cpu_node:
#define QMAN_PAACE 1
#define QMAN_PORTAL_PAACE 2
#define BMAN_PAACE 3
+#define FMAN_PAACE 4
+#define PMAN_PAACE 5
/**
- * Setup operation mapping and stash destinations for QMAN and QMAN portal.
+ * Setup operation mapping and stash destinations for DPAA (QMAN, QMAN portal
+ * FMAN, BMAN) and PMAN.
* Memory accesses to QMAN and BMAN private memory need not be coherent, so
* clear the PAACE entry coherency attribute for them.
*/
-static void setup_qbman_paace(struct paace *ppaace, int paace_type)
+static void setup_dpaa_paace(struct paace *ppaace, int paace_type)
{
switch (paace_type) {
case QMAN_PAACE:
@@ -634,6 +811,17 @@ static void setup_qbman_paace(struct paace *ppaace, int paace_type)
set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
0);
break;
+ case FMAN_PAACE:
+ set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
+ ppaace->op_encode.index_ot.omi = OMI_FMAN;
+ /*Set frame stashing for the L3 cache */
+ set_bf(ppaace->impl_attr, PAACE_IA_CID,
+ get_stash_id(PAMU_ATTR_CACHE_L3, 0));
+ break;
+ case PMAN_PAACE:
+ set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
+ ppaace->op_encode.index_ot.omi = OMI_PMAN;
+ break;
}
}
@@ -674,13 +862,25 @@ static void __init setup_omt(struct ome *omt)
ome = &omt[OMI_CAAM];
ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READI;
ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
+
+ /* Configure OMI_PMAN */
+ ome = &omt[OMI_PMAN];
+ ome->moe[IOE_DIRECT0_IDX] = EOE_LDEC | EOE_VALID;
+ ome->moe[IOE_DIRECT1_IDX] = EOE_LDEC | EOE_VALID;
+
+ /* Configure OMI_DSP */
+ ome = &omt[OMI_DSP];
+ ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_RWNITC;
+ ome->moe[IOE_EREAD0_IDX] = EOE_VALID | EOE_RWNITC;
+ ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WWSAO;
+ ome->moe[IOE_EWRITE0_IDX] = EOE_VALID | EOE_WWSAO;
}
/*
* Get the maximum number of PAACT table entries
* and subwindows supported by PAMU
*/
-static void get_pamu_cap_values(unsigned long pamu_reg_base)
+static void get_pamu_cap_values(void *pamu_reg_base)
{
u32 pc_val;
@@ -690,9 +890,8 @@ static void get_pamu_cap_values(unsigned long pamu_reg_base)
}
/* Setup PAMU registers pointing to PAACT, SPAACT and OMT */
-int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
- phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
- phys_addr_t omt_phys)
+int setup_one_pamu(void *pamu_reg_base, phys_addr_t ppaact_phys,
+ phys_addr_t spaact_phys, phys_addr_t omt_phys)
{
u32 *pc;
struct pamu_mmap_regs *pamu_regs;
@@ -701,6 +900,16 @@ int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
pamu_regs = (struct pamu_mmap_regs *)
(pamu_reg_base + PAMU_MMAP_REGS_BASE);
+ /*
+ * As per PAMU errata A-005982, writing the PAACT and SPAACT
+ * base address registers wouldn't invalidate the corresponding
+ * caches if the OMT cache is disabled. The workaround is to
+ * enable the OMT cache before setting the base registers.
+ * This can be done without actually enabling PAMU.
+ */
+
+ out_be32(pc, PAMU_PC_OCE);
+
/* set up pointers to corenet control blocks */
out_be32(&pamu_regs->ppbah, upper_32_bits(ppaact_phys));
@@ -733,6 +942,28 @@ int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
return 0;
}
+/*
+ * Primarily to Enable LIODNs which u-boot didn't update in the device tree.
+ */
+static void __init enable_remaining_liodns(void)
+{
+ int liodn;
+ struct paace *ppaace;
+
+ for (liodn = 0; liodn < PAACE_NUMBER_ENTRIES; liodn++) {
+ ppaace = pamu_get_ppaace(liodn);
+ if (!get_bf(ppaace->addr_bitfields, PAACE_AF_V)) {
+ setup_default_ppaace(ppaace);
+ /*
+ * Ensure that all stores to the ppaace
+ * complete first.
+ */
+ mb();
+ pamu_enable_liodn(liodn);
+ }
+ }
+}
+
/* Enable all device LIODNS */
static void __init setup_liodns(void)
{
@@ -752,30 +983,39 @@ static void __init setup_liodns(void)
continue;
}
ppaace = pamu_get_ppaace(liodn);
- pamu_init_ppaace(ppaace);
- /* window size is 2^(WSE+1) bytes */
- set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE, 35);
- ppaace->wbah = 0;
- set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL, 0);
- set_bf(ppaace->impl_attr, PAACE_IA_ATM,
- PAACE_ATM_NO_XLATE);
- set_bf(ppaace->addr_bitfields, PAACE_AF_AP,
- PAACE_AP_PERMS_ALL);
+ setup_default_ppaace(ppaace);
+
if (of_device_is_compatible(node, "fsl,qman-portal"))
- setup_qbman_paace(ppaace, QMAN_PORTAL_PAACE);
+ setup_dpaa_paace(ppaace, QMAN_PORTAL_PAACE);
if (of_device_is_compatible(node, "fsl,qman"))
- setup_qbman_paace(ppaace, QMAN_PAACE);
+ setup_dpaa_paace(ppaace, QMAN_PAACE);
if (of_device_is_compatible(node, "fsl,bman"))
- setup_qbman_paace(ppaace, BMAN_PAACE);
+ setup_dpaa_paace(ppaace, BMAN_PAACE);
+ if (of_device_is_compatible(node, "fsl,pman"))
+ setup_dpaa_paace(ppaace, PMAN_PAACE);
+#ifdef CONFIG_FSL_FMAN_CPC_STASH
+ if (of_match_node(fman_device_ids, node))
+ setup_dpaa_paace(ppaace, FMAN_PAACE);
+#endif
mb();
pamu_enable_liodn(liodn);
}
}
+
+ /*
+ * Currently u-boot doesn't fixup LIODNs for cases
+ * where a frame is passed to a hardware block from
+ * another hardware block. For example, frame can
+ * be passed from FMAN rx port to SEC or RMAN. So,
+ * as a work around we enable all the possible LIODN
+ * values.
+ */
+ enable_remaining_liodns();
}
irqreturn_t pamu_av_isr(int irq, void *arg)
{
- struct pamu_isr_data *data = arg;
+ struct pamu_info *data = arg;
phys_addr_t phys;
unsigned int i, j, ret;
@@ -1020,11 +1260,9 @@ static const struct {
static int __init fsl_pamu_probe(struct platform_device *pdev)
{
void __iomem *pamu_regs = NULL;
- struct ccsr_guts __iomem *guts_regs = NULL;
u32 pamubypenr, pamu_counter;
+ void __iomem *pamu_reg_base;
unsigned long pamu_reg_off;
- unsigned long pamu_reg_base;
- struct pamu_isr_data *data = NULL;
struct device_node *guts_node;
u64 size;
struct page *p;
@@ -1050,23 +1288,17 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
}
of_get_address(pdev->dev.of_node, 0, &size, NULL);
+ pamu_info_data.pamu_reg_base = pamu_regs;
+ pamu_info_data.count = size / PAMU_OFFSET;
+
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (irq == NO_IRQ) {
dev_warn(&pdev->dev, "no interrupts listed in PAMU node\n");
goto error;
}
- data = kzalloc(sizeof(struct pamu_isr_data), GFP_KERNEL);
- if (!data) {
- dev_err(&pdev->dev, "PAMU isr data memory allocation failed\n");
- ret = -ENOMEM;
- goto error;
- }
- data->pamu_reg_base = pamu_regs;
- data->count = size / PAMU_OFFSET;
-
/* The ISR needs access to the regs, so we won't iounmap them */
- ret = request_irq(irq, pamu_av_isr, 0, "pamu", data);
+ ret = request_irq(irq, pamu_av_isr, 0, "pamu", &pamu_info_data);
if (ret < 0) {
dev_err(&pdev->dev, "error %i installing ISR for irq %i\n",
ret, irq);
@@ -1090,7 +1322,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
}
/* read in the PAMU capability registers */
- get_pamu_cap_values((unsigned long)pamu_regs);
+ get_pamu_cap_values(pamu_regs);
/*
* To simplify the allocation of a coherency domain, we allocate the
* PAACT and the OMT in the same memory buffer. Unfortunately, this
@@ -1169,9 +1401,9 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
for (pamu_reg_off = 0, pamu_counter = 0x80000000; pamu_reg_off < size;
pamu_reg_off += PAMU_OFFSET, pamu_counter >>= 1) {
- pamu_reg_base = (unsigned long) pamu_regs + pamu_reg_off;
- setup_one_pamu(pamu_reg_base, pamu_reg_off, ppaact_phys,
- spaact_phys, omt_phys);
+ pamu_reg_base = pamu_regs + pamu_reg_off;
+ setup_one_pamu(pamu_reg_base, ppaact_phys, spaact_phys,
+ omt_phys);
/* Disable PAMU bypass for this PAMU */
pamubypenr &= ~pamu_counter;
}
@@ -1181,8 +1413,6 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
/* Enable all relevant PAMU(s) */
out_be32(&guts_regs->pamubypenr, pamubypenr);
- iounmap(guts_regs);
-
/* Enable DMA for the LIODNs in the device tree*/
setup_liodns();
@@ -1194,12 +1424,7 @@ error_genpool:
error:
if (irq != NO_IRQ)
- free_irq(irq, data);
-
- if (data) {
- memset(data, 0, sizeof(struct pamu_isr_data));
- kfree(data);
- }
+ free_irq(irq, &pamu_info_data);
if (pamu_regs)
iounmap(pamu_regs);
@@ -1233,6 +1458,77 @@ static struct platform_driver fsl_of_pamu_driver = {
.probe = fsl_pamu_probe,
};
+#ifdef CONFIG_SUSPEND
+static int iommu_suspend(void)
+{
+ int i;
+
+ for (i = 0; i < pamu_info_data.count; i++) {
+ u32 val;
+ void __iomem *p;
+
+ p = pamu_info_data.pamu_reg_base + i * PAMU_OFFSET;
+ val = in_be32((u32 *)(p + PAMU_PICS));
+ /* Disable access violation interrupts */
+ out_be32((u32 *)(p + PAMU_PICS),
+ val & ~PAMU_ACCESS_VIOLATION_ENABLE);
+ }
+
+ return 0;
+}
+
+static void restore_dcfg_liodns(void)
+{
+ struct device_node *node;
+ const __be32 *prop;
+ int ret, liodn;
+
+ for_each_node_with_property(node, "fsl,liodn-reg") {
+ prop = of_get_property(node, "fsl,liodn", 0);
+ if (!prop)
+ continue;
+ liodn = be32_to_cpup(prop);
+ ret = set_dcfg_liodn(node, liodn);
+ if (ret)
+ pr_debug("LIODN restore failed for %s\n",
+ node->full_name);
+ }
+}
+
+static void iommu_resume(void)
+{
+ int i;
+ u32 pamubypenr, pamu_counter;
+
+ restore_dcfg_liodns();
+ pamubypenr = in_be32(&guts_regs->pamubypenr);
+ for (i = 0, pamu_counter = 0x80000000; i < pamu_info_data.count;
+ i++, pamu_counter >>= 1) {
+ void __iomem *p;
+
+ p = pamu_info_data.pamu_reg_base + i * PAMU_OFFSET;
+ setup_one_pamu(p, virt_to_phys(ppaact), virt_to_phys(spaact),
+ virt_to_phys(omt));
+ pamubypenr &= ~pamu_counter;
+ }
+ /* Enable all PAMUs */
+ out_be32(&guts_regs->pamubypenr, pamubypenr);
+}
+
+static struct syscore_ops iommu_syscore_ops = {
+ .resume = iommu_resume,
+ .suspend = iommu_suspend,
+};
+
+static void __init init_iommu_pm_ops(void)
+{
+ register_syscore_ops(&iommu_syscore_ops);
+}
+
+#else
+static inline void init_iommu_pm_ops(void) {}
+#endif /* CONFIG_SUSPEND */
+
static __init int fsl_pamu_init(void)
{
struct platform_device *pdev = NULL;
@@ -1290,6 +1586,8 @@ static __init int fsl_pamu_init(void)
goto error_device_add;
}
+ init_iommu_pm_ops();
+
return 0;
error_device_add:
diff --git a/drivers/iommu/fsl_pamu.h b/drivers/iommu/fsl_pamu.h
index 8fc1a12..89940fa 100644
--- a/drivers/iommu/fsl_pamu.h
+++ b/drivers/iommu/fsl_pamu.h
@@ -321,6 +321,12 @@ struct paace {
u32 reserved[8]; /* not currently implemented */
};
+enum paace_field {
+ PAACE_STASH_FIELD,
+ PAACE_OMI_FIELD,
+ PAACE_FIELD_MAX,
+};
+
/* OME : Operation mapping entry
* MOE : Mapped Operation Encodings
* The operation mapping table is table containing operation mapping entries (OME).
@@ -403,8 +409,9 @@ int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin_addr,
u32 get_stash_id(u32 stash_dest_hint, u32 vcpu);
void get_ome_index(u32 *omi_index, struct device *dev);
-int pamu_update_paace_stash(int liodn, u32 subwin, u32 value);
+int pamu_update_paace_field(int liodn, u32 subwin, int field, u32 value);
int pamu_disable_spaace(int liodn, u32 subwin);
u32 pamu_get_max_subwin_cnt(void);
+void enable_default_dma_window(int liodn);
#endif /* __FSL_PAMU_H */
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index c857c30..360fbc4 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -123,10 +123,10 @@ static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
spin_lock_irqsave(&iommu_lock, flags);
ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i,
sub_win_ptr[i].size,
- ~(u32)0,
+ sub_win_ptr[i].omi,
rpn,
dma_domain->snoop_id,
- dma_domain->stash_id,
+ sub_win_ptr[i].stash_id,
(i > 0) ? 1 : 0,
sub_win_ptr[i].prot);
spin_unlock_irqrestore(&iommu_lock, flags);
@@ -151,9 +151,9 @@ static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
spin_lock_irqsave(&iommu_lock, flags);
ret = pamu_config_ppaace(liodn, wnd_addr,
wnd->size,
- ~(u32)0,
+ wnd->omi,
wnd->paddr >> PAMU_PAGE_SHIFT,
- dma_domain->snoop_id, dma_domain->stash_id,
+ dma_domain->snoop_id, wnd->stash_id,
0, wnd->prot);
spin_unlock_irqrestore(&iommu_lock, flags);
if (ret)
@@ -184,10 +184,10 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr
if (dma_domain->win_cnt > 1) {
ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr,
wnd->size,
- ~(u32)0,
+ wnd->omi,
wnd->paddr >> PAMU_PAGE_SHIFT,
dma_domain->snoop_id,
- dma_domain->stash_id,
+ wnd->stash_id,
(wnd_nr > 0) ? 1 : 0,
wnd->prot);
if (ret)
@@ -199,9 +199,9 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr
ret = pamu_config_ppaace(liodn, wnd_addr,
wnd->size,
- ~(u32)0,
+ wnd->omi,
wnd->paddr >> PAMU_PAGE_SHIFT,
- dma_domain->snoop_id, dma_domain->stash_id,
+ dma_domain->snoop_id, wnd->stash_id,
0, wnd->prot);
if (ret)
pr_debug("Window reconfiguration failed for liodn %d\n", liodn);
@@ -212,29 +212,56 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr
return ret;
}
-static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
- u32 val)
+struct pamu_attr_info {
+ u32 window;
+ int field;
+ u32 value;
+};
+
+static int update_liodn_attr(int liodn, struct fsl_dma_domain *dma_domain,
+ struct pamu_attr_info *attr_info)
{
int ret = 0, i;
- unsigned long flags;
- spin_lock_irqsave(&iommu_lock, flags);
- if (!dma_domain->win_arr) {
- pr_debug("Windows not configured, stash destination update failed for liodn %d\n", liodn);
- spin_unlock_irqrestore(&iommu_lock, flags);
- return -EINVAL;
- }
+ spin_lock(&iommu_lock);
- for (i = 0; i < dma_domain->win_cnt; i++) {
- ret = pamu_update_paace_stash(liodn, i, val);
- if (ret) {
- pr_debug("Failed to update SPAACE %d field for liodn %d\n ", i, liodn);
- spin_unlock_irqrestore(&iommu_lock, flags);
- return ret;
+
+ if (~attr_info->window == 0) {
+ for (i = 0; i < dma_domain->win_cnt; i++) {
+ ret = pamu_update_paace_field(liodn, i,
+ attr_info->field,
+ attr_info->value);
+ if (ret)
+ break;
}
- }
+ } else
+ ret = pamu_update_paace_field(liodn, attr_info->window,
+ attr_info->field,
+ attr_info->value);
- spin_unlock_irqrestore(&iommu_lock, flags);
+ spin_unlock(&iommu_lock);
+
+ return ret;
+}
+
+/*
+ * Update attribute for all LIODNs associated with the domain
+ *
+ */
+static int update_domain_attr(struct fsl_dma_domain *dma_domain,
+ struct pamu_attr_info *attr_info)
+{
+ struct device_domain_info *info;
+ int ret = 0;
+
+ if (!list_empty(&dma_domain->devices)) {
+ list_for_each_entry(info, &dma_domain->devices, link) {
+ ret = update_liodn_attr(info->liodn, dma_domain,
+ attr_info);
+ if (ret)
+ break;
+ }
+ }
return ret;
}
@@ -266,7 +293,7 @@ static int pamu_set_liodn(int liodn, struct device *dev,
if (!ret)
ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index,
0, dma_domain->snoop_id,
- dma_domain->stash_id, win_cnt, 0);
+ ~(u32)0, win_cnt, 0);
spin_unlock_irqrestore(&iommu_lock, flags);
if (ret) {
pr_debug("PAMU PAACE configuration failed for liodn %d, win_cnt =%d\n", liodn, win_cnt);
@@ -282,7 +309,7 @@ static int pamu_set_liodn(int liodn, struct device *dev,
ret = pamu_config_spaace(liodn, win_cnt, i,
subwin_size, omi_index,
0, dma_domain->snoop_id,
- dma_domain->stash_id,
+ ~(u32)0,
0, 0);
spin_unlock_irqrestore(&iommu_lock, flags);
if (ret) {
@@ -301,7 +328,7 @@ static int check_size(u64 size, dma_addr_t iova)
* Size must be a power of two and at least be equal
* to PAMU page size.
*/
- if (!is_power_of_2(size) || size < PAMU_PAGE_SIZE) {
+ if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
pr_debug("%s: size too small or not a power of two\n", __func__);
return -EINVAL;
}
@@ -323,7 +350,6 @@ static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
if (!domain)
return NULL;
- domain->stash_id = ~(u32)0;
domain->snoop_id = ~(u32)0;
domain->win_cnt = pamu_get_max_subwin_cnt();
domain->geom_size = 0;
@@ -340,17 +366,57 @@ static inline struct device_domain_info *find_domain(struct device *dev)
return dev->archdata.iommu_domain;
}
+/* Disable device DMA capability and enable default DMA window */
+static void disable_device_dma(struct device_domain_info *info,
+ int enable_dma_window)
+{
+#ifdef CONFIG_PCI
+ if (info->dev->bus == &pci_bus_type) {
+ struct pci_dev *pdev = NULL;
+
+ pdev = to_pci_dev(info->dev);
+ pci_clear_master(pdev);
+ }
+#endif
+
+ if (enable_dma_window)
+ enable_default_dma_window(info->liodn);
+}
+
+static int check_for_shared_liodn(struct device_domain_info *info)
+{
+ struct device_domain_info *tmp;
+
+ /*
+ * Sanity check, to ensure that this is not a
+ * shared LIODN. In case of a PCIe controller
+ * it's possible that all PCIe devices share
+ * the same LIODN.
+ */
+ list_for_each_entry(tmp, &info->domain->devices, link) {
+ if (info->liodn == tmp->liodn)
+ return 1;
+ }
+
+ return 0;
+}
+
static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
{
unsigned long flags;
+ int enable_dma_window = 0;
list_del(&info->link);
spin_lock_irqsave(&iommu_lock, flags);
- if (win_cnt > 1)
- pamu_free_subwins(info->liodn);
- pamu_disable_liodn(info->liodn);
+ if (!check_for_shared_liodn(info)) {
+ if (win_cnt > 1)
+ pamu_free_subwins(info->liodn);
+ pamu_disable_liodn(info->liodn);
+ enable_dma_window = 1;
+ }
spin_unlock_irqrestore(&iommu_lock, flags);
spin_lock_irqsave(&device_domain_lock, flags);
+ disable_device_dma(info, enable_dma_window);
info->dev->archdata.iommu_domain = NULL;
kmem_cache_free(iommu_devinfo_cache, info);
spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -475,21 +541,6 @@ static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain,
return ret;
}
-/* Update stash destination for all LIODNs associated with the domain */
-static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
-{
- struct device_domain_info *info;
- int ret = 0;
-
- list_for_each_entry(info, &dma_domain->devices, link) {
- ret = update_liodn_stash(info->liodn, dma_domain, val);
- if (ret)
- break;
- }
-
- return ret;
-}
-
/* Update domain mappings for all LIODNs associated with the domain */
static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
{
@@ -677,21 +728,15 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain,
return ret;
}
-static int fsl_pamu_attach_device(struct iommu_domain *domain,
- struct device *dev)
+static struct device *get_dma_device(struct device *dev)
{
- struct fsl_dma_domain *dma_domain = domain->priv;
- const u32 *liodn;
- u32 liodn_cnt;
- int len, ret = 0;
- struct pci_dev *pdev = NULL;
- struct pci_controller *pci_ctl;
+ struct device *dma_dev = dev;
+#ifdef CONFIG_PCI
- /*
- * Use LIODN of the PCI controller while attaching a
- * PCI device.
- */
if (dev->bus == &pci_bus_type) {
+ struct pci_controller *pci_ctl;
+ struct pci_dev *pdev;
+
pdev = to_pci_dev(dev);
pci_ctl = pci_bus_to_host(pdev->bus);
/*
@@ -699,17 +744,31 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
* so we can get the LIODN programmed by
* u-boot.
*/
- dev = pci_ctl->parent;
+ dma_dev = pci_ctl->parent;
}
+#endif
+ return dma_dev;
+}
+
+static int fsl_pamu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct fsl_dma_domain *dma_domain = domain->priv;
+ struct device *dma_dev;
+ const u32 *liodn;
+ u32 liodn_cnt;
+ int len, ret = 0;
+
+ dma_dev = get_dma_device(dev);
- liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
+ liodn = of_get_property(dma_dev->of_node, "fsl,liodn", &len);
if (liodn) {
liodn_cnt = len / sizeof(u32);
ret = handle_attach_device(dma_domain, dev,
liodn, liodn_cnt);
} else {
pr_debug("missing fsl,liodn property at %s\n",
- dev->of_node->full_name);
+ dma_dev->of_node->full_name);
ret = -EINVAL;
}
@@ -720,32 +779,18 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
struct fsl_dma_domain *dma_domain = domain->priv;
+ struct device *dma_dev;
const u32 *prop;
int len;
- struct pci_dev *pdev = NULL;
- struct pci_controller *pci_ctl;
- /*
- * Use LIODN of the PCI controller while detaching a
- * PCI device.
- */
- if (dev->bus == &pci_bus_type) {
- pdev = to_pci_dev(dev);
- pci_ctl = pci_bus_to_host(pdev->bus);
- /*
- * make dev point to pci controller device
- * so we can get the LIODN programmed by
- * u-boot.
- */
- dev = pci_ctl->parent;
- }
+ dma_dev = get_dma_device(dev);
- prop = of_get_property(dev->of_node, "fsl,liodn", &len);
+ prop = of_get_property(dma_dev->of_node, "fsl,liodn", &len);
if (prop)
detach_device(dev, dma_domain);
else
pr_debug("missing fsl,liodn property at %s\n",
- dev->of_node->full_name);
+ dma_dev->of_node->full_name);
}
static int configure_domain_geometry(struct iommu_domain *domain, void *data)
@@ -783,27 +828,101 @@ static int configure_domain_geometry(struct iommu_domain *domain, void *data)
return 0;
}
+static inline int check_attr_window(u32 wnd, struct fsl_dma_domain *dma_domain)
+{
+ return (~wnd != 0) && (wnd >= dma_domain->win_cnt);
+}
+
+/* Set the domain operation mapping attribute */
+static int configure_domain_op_map(struct fsl_dma_domain *dma_domain,
+ void *data)
+{
+ struct dma_window *wnd;
+ unsigned long flags;
+ struct pamu_attr_info attr_info;
+ int ret, i;
+ struct pamu_omi_attribute *omi_attr = data;
+
+ spin_lock_irqsave(&dma_domain->domain_lock, flags);
+
+ if (!dma_domain->win_arr) {
+ pr_err("Number of windows not configured\n");
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return -ENODEV;
+ }
+
+ if (omi_attr->omi >= OMI_MAX ||
+ check_attr_window(omi_attr->window, dma_domain)) {
+ pr_err("Invalid operation mapping index\n");
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return -EINVAL;
+ }
+
+ if (~omi_attr->window == 0) {
+ wnd = &dma_domain->win_arr[0];
+ for (i = 0; i < dma_domain->win_cnt; i++)
+ wnd[i].omi = omi_attr->omi;
+ } else {
+ wnd = &dma_domain->win_arr[omi_attr->window];
+ wnd->omi = omi_attr->omi;
+ }
+
+ attr_info.window = omi_attr->window;
+ attr_info.field = PAACE_OMI_FIELD;
+ attr_info.value = omi_attr->omi;
+ ret = update_domain_attr(dma_domain, &attr_info);
+
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+
+ return ret;
+}
+
/* Set the domain stash attribute */
static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
{
struct pamu_stash_attribute *stash_attr = data;
+ struct dma_window *wnd;
unsigned long flags;
- int ret;
+ u32 stash_id;
+ int ret, i;
+ struct pamu_attr_info attr_info;
spin_lock_irqsave(&dma_domain->domain_lock, flags);
- memcpy(&dma_domain->dma_stash, stash_attr,
- sizeof(struct pamu_stash_attribute));
+ if (!dma_domain->win_arr) {
+ pr_err("Number of windows not configured\n");
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return -ENODEV;
+ }
- dma_domain->stash_id = get_stash_id(stash_attr->cache,
+ stash_id = get_stash_id(stash_attr->cache,
stash_attr->cpu);
- if (dma_domain->stash_id == ~(u32)0) {
- pr_debug("Invalid stash attributes\n");
+ if ((~stash_id == 0) ||
+ check_attr_window(stash_attr->window, dma_domain)) {
+ pr_err("Invalid stash attributes\n");
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
return -EINVAL;
}
- ret = update_domain_stash(dma_domain, dma_domain->stash_id);
+ if (~stash_attr->window == 0) {
+ wnd = &dma_domain->win_arr[0];
+ for (i = 0; i < dma_domain->win_cnt; i++) {
+ wnd[i].stash_id = stash_id;
+ memcpy(&wnd[i].stash_attr, stash_attr,
+ sizeof(struct pamu_stash_attribute));
+ wnd[i].stash_attr.window = i;
+ }
+ } else {
+ wnd = &dma_domain->win_arr[stash_attr->window];
+ wnd->stash_id = stash_id;
+ memcpy(&wnd->stash_attr,
+ stash_attr, sizeof(struct pamu_stash_attribute));
+ }
+
+ attr_info.window = stash_attr->window;
+ attr_info.field = PAACE_STASH_FIELD;
+ attr_info.value = stash_id;
+ ret = update_domain_attr(dma_domain, &attr_info);
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
@@ -856,6 +975,9 @@ static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
case DOMAIN_ATTR_FSL_PAMU_ENABLE:
ret = configure_domain_dma_state(dma_domain, *(int *)data);
break;
+ case DOMAIN_ATTR_FSL_PAMU_OP_MAP:
+ ret = configure_domain_op_map(dma_domain, data);
+ break;
default:
pr_debug("Unsupported attribute type\n");
ret = -EINVAL;
@@ -873,16 +995,37 @@ static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
switch (attr_type) {
- case DOMAIN_ATTR_FSL_PAMU_STASH:
- memcpy((struct pamu_stash_attribute *) data, &dma_domain->dma_stash,
- sizeof(struct pamu_stash_attribute));
- break;
case DOMAIN_ATTR_FSL_PAMU_ENABLE:
*(int *)data = dma_domain->enabled;
break;
case DOMAIN_ATTR_FSL_PAMUV1:
*(int *)data = DOMAIN_ATTR_FSL_PAMUV1;
break;
+ case DOMAIN_ATTR_FSL_PAMU_STASH: {
+ struct pamu_stash_attribute *stash_attr = data;
+ struct dma_window *wnd;
+
+ if (stash_attr->window >= dma_domain->win_cnt ||
+ ~stash_attr->window == 0)
+ return -EINVAL;
+
+ wnd = &dma_domain->win_arr[stash_attr->window];
+ memcpy(stash_attr, &wnd->stash_attr,
+ sizeof(struct pamu_stash_attribute));
+ break;
+ }
+ case DOMAIN_ATTR_FSL_PAMU_OP_MAP: {
+ struct pamu_omi_attribute *omi_attr = data;
+ struct dma_window *wnd;
+
+ if (omi_attr->window >= dma_domain->win_cnt ||
+ ~omi_attr->window == 0)
+ return -EINVAL;
+
+ wnd = &dma_domain->win_arr[omi_attr->window];
+ omi_attr->omi = wnd->omi;
+ break;
+ }
default:
pr_debug("Unsupported attribute type\n");
ret = -EINVAL;
@@ -905,6 +1048,7 @@ static struct iommu_group *get_device_iommu_group(struct device *dev)
return group;
}
+#ifdef CONFIG_PCI
static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
{
u32 version;
@@ -945,13 +1089,18 @@ static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
return NULL;
}
-static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
+static struct iommu_group *get_pci_device_group(struct device *dev)
{
struct pci_controller *pci_ctl;
bool pci_endpt_partioning;
struct iommu_group *group = NULL;
- struct pci_dev *bridge, *dma_pdev = NULL;
+ struct pci_dev *bridge, *pdev;
+ struct pci_dev *dma_pdev = NULL;
+ pdev = to_pci_dev(dev);
+ /* Don't create device groups for virtual PCI bridges */
+ if (pdev->subordinate)
+ return NULL;
pci_ctl = pci_bus_to_host(pdev->bus);
pci_endpt_partioning = check_pci_ctl_endpt_part(pci_ctl);
/* We can partition PCIe devices so assign device group to the device */
@@ -1044,11 +1193,11 @@ root_bus:
return group;
}
+#endif
static int fsl_pamu_add_device(struct device *dev)
{
struct iommu_group *group = NULL;
- struct pci_dev *pdev;
const u32 *prop;
int ret, len;
@@ -1056,19 +1205,15 @@ static int fsl_pamu_add_device(struct device *dev)
* For platform devices we allocate a separate group for
* each of the devices.
*/
- if (dev->bus == &pci_bus_type) {
- pdev = to_pci_dev(dev);
- /* Don't create device groups for virtual PCI bridges */
- if (pdev->subordinate)
- return 0;
-
- group = get_pci_device_group(pdev);
-
- } else {
+ if (dev->bus == &platform_bus_type) {
prop = of_get_property(dev->of_node, "fsl,liodn", &len);
if (prop)
group = get_device_iommu_group(dev);
}
+#ifdef CONFIG_PCI
+ else
+ group = get_pci_device_group(dev);
+#endif
if (!group || IS_ERR(group))
return PTR_ERR(group);
@@ -1084,6 +1229,16 @@ static void fsl_pamu_remove_device(struct device *dev)
iommu_group_remove_device(dev);
}
+static void dma_domain_init_windows(struct fsl_dma_domain *dma_domain)
+{
+ int i;
+
+ for (i = 0; i < dma_domain->win_cnt; i++) {
+ dma_domain->win_arr[i].stash_id = ~(u32)0;
+ dma_domain->win_arr[i].omi = ~(u32)0;
+ }
+}
+
static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
{
struct fsl_dma_domain *dma_domain = domain->priv;
@@ -1127,6 +1282,7 @@ static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
return -ENOMEM;
}
dma_domain->win_cnt = w_count;
+ dma_domain_init_windows(dma_domain);
}
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
@@ -1140,6 +1296,17 @@ static u32 fsl_pamu_get_windows(struct iommu_domain *domain)
return dma_domain->win_cnt;
}
+static struct iommu_domain *fsl_get_dev_domain(struct device *dev)
+{
+ struct device_domain_info *info;
+
+ info = dev->archdata.iommu_domain;
+ if (info && info->domain)
+ return info->domain->iommu_domain;
+
+ return NULL;
+}
+
static struct iommu_ops fsl_pamu_ops = {
.domain_init = fsl_pamu_domain_init,
.domain_destroy = fsl_pamu_domain_destroy,
@@ -1155,6 +1322,7 @@ static struct iommu_ops fsl_pamu_ops = {
.domain_get_attr = fsl_pamu_get_domain_attr,
.add_device = fsl_pamu_add_device,
.remove_device = fsl_pamu_remove_device,
+ .get_dev_iommu_domain = fsl_get_dev_domain,
};
int pamu_domain_init()
@@ -1166,7 +1334,9 @@ int pamu_domain_init()
return ret;
bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
+#ifdef CONFIG_PCI
bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
+#endif
return ret;
}
diff --git a/drivers/iommu/fsl_pamu_domain.h b/drivers/iommu/fsl_pamu_domain.h
index c90293f..60a8452 100644
--- a/drivers/iommu/fsl_pamu_domain.h
+++ b/drivers/iommu/fsl_pamu_domain.h
@@ -22,10 +22,13 @@
#include "fsl_pamu.h"
struct dma_window {
- phys_addr_t paddr;
- u64 size;
- int valid;
- int prot;
+ phys_addr_t paddr;
+ u64 size;
+ int valid;
+ int prot;
+ struct pamu_stash_attribute stash_attr;
+ u32 stash_id;
+ u32 omi;
};
struct fsl_dma_domain {
@@ -67,9 +70,6 @@ struct fsl_dma_domain {
*/
int mapped;
int enabled;
- /* stash_id obtained from the stash attribute details */
- u32 stash_id;
- struct pamu_stash_attribute dma_stash;
u32 snoop_id;
struct iommu_domain *iommu_domain;
spinlock_t domain_lock;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index fbe9ca7..6ac5f50 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -696,6 +696,16 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
}
EXPORT_SYMBOL_GPL(iommu_detach_device);
+struct iommu_domain *iommu_get_dev_domain(struct device *dev)
+{
+ struct iommu_ops *ops = dev->bus->iommu_ops;
+
+ if (unlikely(ops == NULL || ops->get_dev_iommu_domain == NULL))
+ return NULL;
+
+ return ops->get_dev_iommu_domain(dev);
+}
+EXPORT_SYMBOL_GPL(iommu_get_dev_domain);
/*
* IOMMU groups are really the natrual working unit of the IOMMU, but
* the IOMMU API works on domains and devices. Bridge that gap by