From 2088ecba93f68252464b95375d80a12fe723ba5e Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Wed, 22 Oct 2014 17:22:19 -0500 Subject: iommu/omap: Remove refcount field from omap_iommu object The refcount field in omap_iommu object is primarily used to check if an IOMMU device has already been enabled, but this is already implicit in the omap_iommu_attach_dev() which ensures that only a single device can attach to an IOMMU. This field is redundant, and so has been cleaned up. Signed-off-by: Suman Anna Acked-by: Laurent Pinchart Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 3627887..ea04e4d 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -819,8 +819,9 @@ static irqreturn_t iommu_fault_handler(int irq, void *data) u32 *iopgd, *iopte; struct omap_iommu *obj = data; struct iommu_domain *domain = obj->domain; + struct omap_iommu_domain *omap_domain = domain->priv; - if (!obj->refcount) + if (!omap_domain->iommu_dev) return IRQ_NONE; errs = iommu_report_fault(obj, &da); @@ -880,13 +881,6 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) spin_lock(&obj->iommu_lock); - /* an iommu device can only be attached once */ - if (++obj->refcount > 1) { - dev_err(dev, "%s: already attached!\n", obj->name); - err = -EBUSY; - goto err_enable; - } - obj->iopgd = iopgd; err = iommu_enable(obj); if (err) @@ -899,7 +893,6 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) return obj; err_enable: - obj->refcount--; spin_unlock(&obj->iommu_lock); return ERR_PTR(err); } @@ -915,9 +908,7 @@ static void omap_iommu_detach(struct omap_iommu *obj) spin_lock(&obj->iommu_lock); - if (--obj->refcount == 0) - iommu_disable(obj); - + iommu_disable(obj); obj->iopgd = NULL; spin_unlock(&obj->iommu_lock); diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h index 4f1b68c..5c14000 100644 --- a/drivers/iommu/omap-iommu.h +++ b/drivers/iommu/omap-iommu.h @@ -33,7 +33,6 @@ struct omap_iommu { void *isr_priv; struct iommu_domain *domain; - unsigned int refcount; spinlock_t iommu_lock; /* global for this whole object */ /* -- cgit v0.10.2 From 4cfcf2ab01617401236b84c3539a59099592c7f3 Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Wed, 22 Oct 2014 17:22:20 -0500 Subject: iommu/omap: Remove unused isr_priv field from omap_iommu The isr_priv field is a left-over from before the IOMMU API adaptation, this was used to store the callback data. This is no longer relevant, so remove it. Signed-off-by: Suman Anna Acked-by: Laurent Pinchart Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h index 5c14000..18a0f3a 100644 --- a/drivers/iommu/omap-iommu.h +++ b/drivers/iommu/omap-iommu.h @@ -30,7 +30,6 @@ struct omap_iommu { const char *name; void __iomem *regbase; struct device *dev; - void *isr_priv; struct iommu_domain *domain; spinlock_t iommu_lock; /* global for this whole object */ -- cgit v0.10.2 From 52dad776fcb644f9ccfc1986c965bd9438e59b21 Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Wed, 22 Oct 2014 17:22:21 -0500 Subject: iommu/omap: Remove duplicate declarations The omap_iommu_save_ctx() and omap_iommu_restore_ctx() declarations are defined in include/linux/omap-iommu.h and do not belong in the internal drivers/iommu/omap-iommu.h header, so remove them. Signed-off-by: Suman Anna Acked-by: Laurent Pinchart Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h index 18a0f3a..4fc51c8 100644 --- a/drivers/iommu/omap-iommu.h +++ b/drivers/iommu/omap-iommu.h @@ -197,9 +197,6 @@ extern void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e); extern int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e); -extern void omap_iommu_save_ctx(struct device *dev); -extern void omap_iommu_restore_ctx(struct device *dev); - extern int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *)); -- cgit v0.10.2 From 533b40cccd2fbe8cf6ba361cab7e99f626be45bf Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Wed, 22 Oct 2014 17:22:22 -0500 Subject: iommu/omap: Remove conditional definition of dev_to_omap_iommu() The dev_to_omap_iommu() is local to the OMAP IOMMU modules, and need not be defined conditionally. The CONFIG_IOMMU_API dependency check was added in the past to fix a compilation issue back when the header resided in the arch/arm layers, and is no longer needed. While at this, fix the header against double inclusion as well. Signed-off-by: Suman Anna Acked-by: Laurent Pinchart Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h index 4fc51c8..d7c5132 100644 --- a/drivers/iommu/omap-iommu.h +++ b/drivers/iommu/omap-iommu.h @@ -10,6 +10,9 @@ * published by the Free Software Foundation. */ +#ifndef _OMAP_IOMMU_H +#define _OMAP_IOMMU_H + #if defined(CONFIG_ARCH_OMAP1) #error "iommu for this processor not implemented yet" #endif @@ -92,7 +95,6 @@ struct iommu_functions { ssize_t (*dump_ctx)(struct omap_iommu *obj, char *buf, ssize_t len); }; -#ifdef CONFIG_IOMMU_API /** * dev_to_omap_iommu() - retrieves an omap iommu object from a user device * @dev: iommu client device @@ -103,7 +105,6 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) return arch_data->iommu_dev; } -#endif /* * MMU Register offsets @@ -220,3 +221,5 @@ static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs) { __raw_writel(val, obj->regbase + offs); } + +#endif /* _OMAP_IOMMU_H */ -- cgit v0.10.2 From 2b313dd13a1690b9c4e2a968feea890838b77b81 Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Wed, 22 Oct 2014 17:22:23 -0500 Subject: iommu/omap: Remove ver debugfs entry The debugfs entry 'ver' to read the OMAP IOMMU version is not much useful for developers, so it has been removed. The same can be deduced from the register dump, provided by the debugfs entry 'regs', REVISION register. This also allows us to remove the omap_iommu_arch_revision() which is currently returning a fixed value. Signed-off-by: Suman Anna Acked-by: Laurent Pinchart Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c index 531658d..0fb92aa 100644 --- a/drivers/iommu/omap-iommu-debug.c +++ b/drivers/iommu/omap-iommu-debug.c @@ -30,17 +30,6 @@ static DEFINE_MUTEX(iommu_debug_lock); static struct dentry *iommu_debug_root; -static ssize_t debug_read_ver(struct file *file, char __user *userbuf, - size_t count, loff_t *ppos) -{ - u32 ver = omap_iommu_arch_version(); - char buf[MAXCOLUMN], *p = buf; - - p += sprintf(p, "H/W version: %d.%d\n", (ver >> 4) & 0xf , ver & 0xf); - - return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); -} - static ssize_t debug_read_regs(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { @@ -228,7 +217,6 @@ static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, .llseek = generic_file_llseek, \ }; -DEBUG_FOPS_RO(ver); DEBUG_FOPS_RO(regs); DEBUG_FOPS_RO(tlb); DEBUG_FOPS(pagetable); @@ -273,7 +261,6 @@ static int iommu_debug_register(struct device *dev, void *data) if (!d) goto nomem; - DEBUG_ADD_FILE_RO(ver); DEBUG_ADD_FILE_RO(regs); DEBUG_ADD_FILE_RO(tlb); DEBUG_ADD_FILE(pagetable); -- cgit v0.10.2 From 3acb04ca5ea4e03c5718a87b713b2fd6f7f52b51 Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Wed, 22 Oct 2014 17:22:24 -0500 Subject: iommu/omap: Remove omap_iommu_arch_version() and version field The function omap_iommu_arch_version() is not used anymore, and is not required either, so remove it. The .version field in struct iommu_functions that this function uses is also removed, as it is not really an ops to retrieve a version and there won't be any usage for this field either. Signed-off-by: Suman Anna Acked-by: Laurent Pinchart Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index ea04e4d..f9efa6b 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -138,15 +138,6 @@ void omap_iommu_restore_ctx(struct device *dev) } EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx); -/** - * omap_iommu_arch_version - Return running iommu arch version - **/ -u32 omap_iommu_arch_version(void) -{ - return arch_iommu->version; -} -EXPORT_SYMBOL_GPL(omap_iommu_arch_version); - static int iommu_enable(struct omap_iommu *obj) { int err; diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h index d7c5132..45fe67d 100644 --- a/drivers/iommu/omap-iommu.h +++ b/drivers/iommu/omap-iommu.h @@ -70,8 +70,6 @@ struct cr_regs { /* architecture specific functions */ struct iommu_functions { - unsigned long version; - int (*enable)(struct omap_iommu *obj); void (*disable)(struct omap_iommu *obj); void (*set_twl)(struct omap_iommu *obj, bool on); @@ -191,8 +189,6 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) /* * global functions */ -extern u32 omap_iommu_arch_version(void); - extern void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e); extern int diff --git a/drivers/iommu/omap-iommu2.c b/drivers/iommu/omap-iommu2.c index 5e1ea3b..2f6a9f7 100644 --- a/drivers/iommu/omap-iommu2.c +++ b/drivers/iommu/omap-iommu2.c @@ -297,8 +297,6 @@ static void omap2_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) } static const struct iommu_functions omap2_iommu_ops = { - .version = IOMMU_ARCH_VERSION, - .enable = omap2_iommu_enable, .disable = omap2_iommu_disable, .set_twl = omap2_iommu_set_twl, -- cgit v0.10.2 From c2372aafbc4ff59bb6a6bda7ed73364d7f7c53bb Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Wed, 22 Oct 2014 17:22:25 -0500 Subject: iommu/omap: Remove bogus version check in context save/restore The omap2_iommu_save_ctx() and omap2_iommu_restore_ctx() performs a sanity version check against a fixed value that is correct only for OMAP2/OMAP3 IOMMUs. This fixed check does not scale for all OMAP2+ IOMMUs and is not absolutely required, so it has been removed. Signed-off-by: Suman Anna Acked-by: Laurent Pinchart Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/omap-iommu2.c b/drivers/iommu/omap-iommu2.c index 2f6a9f7..372141b 100644 --- a/drivers/iommu/omap-iommu2.c +++ b/drivers/iommu/omap-iommu2.c @@ -26,8 +26,6 @@ /* * omap2 architecture specific register bit definitions */ -#define IOMMU_ARCH_VERSION 0x00000011 - /* IRQSTATUS & IRQENABLE */ #define MMU_IRQ_MULTIHITFAULT (1 << 4) #define MMU_IRQ_TABLEWALKFAULT (1 << 3) @@ -268,8 +266,6 @@ static void omap2_iommu_save_ctx(struct omap_iommu *obj) p[i] = iommu_read_reg(obj, i * sizeof(u32)); dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]); } - - BUG_ON(p[0] != IOMMU_ARCH_VERSION); } static void omap2_iommu_restore_ctx(struct omap_iommu *obj) @@ -281,8 +277,6 @@ static void omap2_iommu_restore_ctx(struct omap_iommu *obj) iommu_write_reg(obj, p[i], i * sizeof(u32)); dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]); } - - BUG_ON(p[0] != IOMMU_ARCH_VERSION); } static void omap2_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) -- cgit v0.10.2 From 124262a2eecc4c3e6e96e083e50ce4909b2d69d9 Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Wed, 22 Oct 2014 17:22:26 -0500 Subject: iommu/omap: Simplify omap2_iommu_fault_isr() The function omap2_iommu_fault_isr() does an unnecessary recomputation of the return value. The logic relies on setting the same bit fields as the MMU fault error status bits, so simplify this function and remove the unneeded macros. These macros were originally exported to notify MMU faults to users prior to the IOMMU framework adaptation, but are now redundant. Signed-off-by: Suman Anna Acked-by: Laurent Pinchart Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/omap-iommu2.c b/drivers/iommu/omap-iommu2.c index 372141b..ce2fff3 100644 --- a/drivers/iommu/omap-iommu2.c +++ b/drivers/iommu/omap-iommu2.c @@ -53,13 +53,6 @@ ((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \ ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0) -/* IOMMU errors */ -#define OMAP_IOMMU_ERR_TLB_MISS (1 << 0) -#define OMAP_IOMMU_ERR_TRANS_FAULT (1 << 1) -#define OMAP_IOMMU_ERR_EMU_MISS (1 << 2) -#define OMAP_IOMMU_ERR_TBLWALK_FAULT (1 << 3) -#define OMAP_IOMMU_ERR_MULTIHIT_FAULT (1 << 4) - static void __iommu_set_twl(struct omap_iommu *obj, bool on) { u32 l = iommu_read_reg(obj, MMU_CNTL); @@ -122,7 +115,6 @@ static void omap2_iommu_set_twl(struct omap_iommu *obj, bool on) static u32 omap2_iommu_fault_isr(struct omap_iommu *obj, u32 *ra) { u32 stat, da; - u32 errs = 0; stat = iommu_read_reg(obj, MMU_IRQSTATUS); stat &= MMU_IRQ_MASK; @@ -134,19 +126,9 @@ static u32 omap2_iommu_fault_isr(struct omap_iommu *obj, u32 *ra) da = iommu_read_reg(obj, MMU_FAULT_AD); *ra = da; - if (stat & MMU_IRQ_TLBMISS) - errs |= OMAP_IOMMU_ERR_TLB_MISS; - if (stat & MMU_IRQ_TRANSLATIONFAULT) - errs |= OMAP_IOMMU_ERR_TRANS_FAULT; - if (stat & MMU_IRQ_EMUMISS) - errs |= OMAP_IOMMU_ERR_EMU_MISS; - if (stat & MMU_IRQ_TABLEWALKFAULT) - errs |= OMAP_IOMMU_ERR_TBLWALK_FAULT; - if (stat & MMU_IRQ_MULTIHITFAULT) - errs |= OMAP_IOMMU_ERR_MULTIHIT_FAULT; iommu_write_reg(obj, stat, MMU_IRQSTATUS); - return errs; + return stat; } static void omap2_tlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) -- cgit v0.10.2 From bd4396f09a4a9e77423e92ec9448217ab46f6edf Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Wed, 22 Oct 2014 17:22:27 -0500 Subject: iommu/omap: Consolidate OMAP IOMMU modules The OMAP IOMMU driver was originally designed as modules, and split into a core module and a thin arch-specific module through the OMAP arch-specific struct iommu_functions, to scale for both OMAP1 and OMAP2+ IOMMU variants. The driver can only be built for OMAP2+ platforms currently, and also can only be built-in after the adaptation to generic IOMMU API. The OMAP1 variant was never added and will most probably be never added (the code for the only potential user, its parent, DSP processor has already been cleaned up). So, consolidate the OMAP2 specific omap-iommu2 module into the core OMAP IOMMU driver - this eliminates the arch-specific ops structure and simplifies the driver into a single module that only implements the generic IOMMU API's iommu_ops. The following are the main changes: - omap-iommu2 module is completely eliminated, with the common definitions moved to the internal omap-iommu.h, and the ops implementations moved into omap-iommu.c - OMAP arch-specific struct iommu_functions is also eliminated, with the ops implementations directly absorbed into the calling functions - iotlb_alloc_cr() is no longer inlined and defined only when PREFETCH_IOTLB is defined - iotlb_dump_cr() is similarly defined only when CONFIG_OMAP_IOMMU_DEBUG is defined - Elimination of the OMAP IOMMU exported functions to register the arch ops, omap_install_iommu_arch() & omap_uninstall_iommu_arch() - Any stale comments about OMAP1 are also cleaned up Signed-off-by: Suman Anna Acked-by: Laurent Pinchart Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 16edef7..18fa446 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -11,7 +11,6 @@ obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o -obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index f9efa6b..91262fa 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -76,53 +76,23 @@ struct iotlb_lock { short vict; }; -/* accommodate the difference between omap1 and omap2/3 */ -static const struct iommu_functions *arch_iommu; - static struct platform_driver omap_iommu_driver; static struct kmem_cache *iopte_cachep; /** - * omap_install_iommu_arch - Install archtecure specific iommu functions - * @ops: a pointer to architecture specific iommu functions - * - * There are several kind of iommu algorithm(tlb, pagetable) among - * omap series. This interface installs such an iommu algorighm. - **/ -int omap_install_iommu_arch(const struct iommu_functions *ops) -{ - if (arch_iommu) - return -EBUSY; - - arch_iommu = ops; - return 0; -} -EXPORT_SYMBOL_GPL(omap_install_iommu_arch); - -/** - * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions - * @ops: a pointer to architecture specific iommu functions - * - * This interface uninstalls the iommu algorighm installed previously. - **/ -void omap_uninstall_iommu_arch(const struct iommu_functions *ops) -{ - if (arch_iommu != ops) - pr_err("%s: not your arch\n", __func__); - - arch_iommu = NULL; -} -EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch); - -/** * omap_iommu_save_ctx - Save registers for pm off-mode support * @dev: client device **/ void omap_iommu_save_ctx(struct device *dev) { struct omap_iommu *obj = dev_to_omap_iommu(dev); + u32 *p = obj->ctx; + int i; - arch_iommu->save_ctx(obj); + for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { + p[i] = iommu_read_reg(obj, i * sizeof(u32)); + dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]); + } } EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); @@ -133,20 +103,75 @@ EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); void omap_iommu_restore_ctx(struct device *dev) { struct omap_iommu *obj = dev_to_omap_iommu(dev); + u32 *p = obj->ctx; + int i; - arch_iommu->restore_ctx(obj); + for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { + iommu_write_reg(obj, p[i], i * sizeof(u32)); + dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]); + } } EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx); +static void __iommu_set_twl(struct omap_iommu *obj, bool on) +{ + u32 l = iommu_read_reg(obj, MMU_CNTL); + + if (on) + iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE); + else + iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE); + + l &= ~MMU_CNTL_MASK; + if (on) + l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN); + else + l |= (MMU_CNTL_MMU_EN); + + iommu_write_reg(obj, l, MMU_CNTL); +} + +static int omap2_iommu_enable(struct omap_iommu *obj) +{ + u32 l, pa; + + if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K)) + return -EINVAL; + + pa = virt_to_phys(obj->iopgd); + if (!IS_ALIGNED(pa, SZ_16K)) + return -EINVAL; + + l = iommu_read_reg(obj, MMU_REVISION); + dev_info(obj->dev, "%s: version %d.%d\n", obj->name, + (l >> 4) & 0xf, l & 0xf); + + iommu_write_reg(obj, pa, MMU_TTB); + + if (obj->has_bus_err_back) + iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG); + + __iommu_set_twl(obj, true); + + return 0; +} + +static void omap2_iommu_disable(struct omap_iommu *obj) +{ + u32 l = iommu_read_reg(obj, MMU_CNTL); + + l &= ~MMU_CNTL_MASK; + iommu_write_reg(obj, l, MMU_CNTL); + + dev_dbg(obj->dev, "%s is shutting down\n", obj->name); +} + static int iommu_enable(struct omap_iommu *obj) { int err; struct platform_device *pdev = to_platform_device(obj->dev); struct iommu_platform_data *pdata = pdev->dev.platform_data; - if (!arch_iommu) - return -ENODEV; - if (pdata && pdata->deassert_reset) { err = pdata->deassert_reset(pdev, pdata->reset_name); if (err) { @@ -157,7 +182,7 @@ static int iommu_enable(struct omap_iommu *obj) pm_runtime_get_sync(obj->dev); - err = arch_iommu->enable(obj); + err = omap2_iommu_enable(obj); return err; } @@ -167,7 +192,7 @@ static void iommu_disable(struct omap_iommu *obj) struct platform_device *pdev = to_platform_device(obj->dev); struct iommu_platform_data *pdata = pdev->dev.platform_data; - arch_iommu->disable(obj); + omap2_iommu_disable(obj); pm_runtime_put_sync(obj->dev); @@ -182,7 +207,13 @@ void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) { BUG_ON(!cr || !e); - arch_iommu->cr_to_e(cr, e); + e->da = cr->cam & MMU_CAM_VATAG_MASK; + e->pa = cr->ram & MMU_RAM_PADDR_MASK; + e->valid = cr->cam & MMU_CAM_V; + e->pgsz = cr->cam & MMU_CAM_PGSZ_MASK; + e->endian = cr->ram & MMU_RAM_ENDIAN_MASK; + e->elsz = cr->ram & MMU_RAM_ELSZ_MASK; + e->mixed = cr->ram & MMU_RAM_MIXED; } EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e); @@ -191,31 +222,46 @@ static inline int iotlb_cr_valid(struct cr_regs *cr) if (!cr) return -EINVAL; - return arch_iommu->cr_valid(cr); -} - -static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj, - struct iotlb_entry *e) -{ - if (!e) - return NULL; - - return arch_iommu->alloc_cr(obj, e); + return cr->cam & MMU_CAM_V; } static u32 iotlb_cr_to_virt(struct cr_regs *cr) { - return arch_iommu->cr_to_virt(cr); + u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK; + u32 mask = get_cam_va_mask(cr->cam & page_size); + + return cr->cam & mask; } static u32 get_iopte_attr(struct iotlb_entry *e) { - return arch_iommu->get_pte_attr(e); + u32 attr; + + attr = e->mixed << 5; + attr |= e->endian; + attr |= e->elsz >> 3; + attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) || + (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6); + return attr; } static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da) { - return arch_iommu->fault_isr(obj, da); + u32 status, fault_addr; + + status = iommu_read_reg(obj, MMU_IRQSTATUS); + status &= MMU_IRQ_MASK; + if (!status) { + *da = 0; + return 0; + } + + fault_addr = iommu_read_reg(obj, MMU_FAULT_AD); + *da = fault_addr; + + iommu_write_reg(obj, status, MMU_IRQSTATUS); + + return status; } static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l) @@ -241,31 +287,19 @@ static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l) static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) { - arch_iommu->tlb_read_cr(obj, cr); + cr->cam = iommu_read_reg(obj, MMU_READ_CAM); + cr->ram = iommu_read_reg(obj, MMU_READ_RAM); } static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) { - arch_iommu->tlb_load_cr(obj, cr); + iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM); + iommu_write_reg(obj, cr->ram, MMU_RAM); iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); iommu_write_reg(obj, 1, MMU_LD_TLB); } -/** - * iotlb_dump_cr - Dump an iommu tlb entry into buf - * @obj: target iommu - * @cr: contents of cam and ram register - * @buf: output buffer - **/ -static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, - char *buf) -{ - BUG_ON(!cr || !buf); - - return arch_iommu->dump_cr(obj, cr, buf); -} - /* only used in iotlb iteration for-loop */ static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) { @@ -280,12 +314,36 @@ static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) return cr; } +#ifdef PREFETCH_IOTLB +static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj, + struct iotlb_entry *e) +{ + struct cr_regs *cr; + + if (!e) + return NULL; + + if (e->da & ~(get_cam_va_mask(e->pgsz))) { + dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__, + e->da); + return ERR_PTR(-EINVAL); + } + + cr = kmalloc(sizeof(*cr), GFP_KERNEL); + if (!cr) + return ERR_PTR(-ENOMEM); + + cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid; + cr->ram = e->pa | e->endian | e->elsz | e->mixed; + + return cr; +} + /** * load_iotlb_entry - Set an iommu tlb entry * @obj: target iommu * @e: an iommu tlb entry info **/ -#ifdef PREFETCH_IOTLB static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) { int err = 0; @@ -416,6 +474,44 @@ static void flush_iotlb_all(struct omap_iommu *obj) #if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) +#define pr_reg(name) \ + do { \ + ssize_t bytes; \ + const char *str = "%20s: %08x\n"; \ + const int maxcol = 32; \ + bytes = snprintf(p, maxcol, str, __stringify(name), \ + iommu_read_reg(obj, MMU_##name)); \ + p += bytes; \ + len -= bytes; \ + if (len < maxcol) \ + goto out; \ + } while (0) + +static ssize_t +omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len) +{ + char *p = buf; + + pr_reg(REVISION); + pr_reg(IRQSTATUS); + pr_reg(IRQENABLE); + pr_reg(WALKING_ST); + pr_reg(CNTL); + pr_reg(FAULT_AD); + pr_reg(TTB); + pr_reg(LOCK); + pr_reg(LD_TLB); + pr_reg(CAM); + pr_reg(RAM); + pr_reg(GFLUSH); + pr_reg(FLUSH_ENTRY); + pr_reg(READ_CAM); + pr_reg(READ_RAM); + pr_reg(EMU_FAULT_AD); +out: + return p - buf; +} + ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes) { if (!obj || !buf) @@ -423,7 +519,7 @@ ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes) pm_runtime_get_sync(obj->dev); - bytes = arch_iommu->dump_ctx(obj, buf, bytes); + bytes = omap2_iommu_dump_ctx(obj, buf, bytes); pm_runtime_put_sync(obj->dev); @@ -455,6 +551,24 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num) } /** + * iotlb_dump_cr - Dump an iommu tlb entry into buf + * @obj: target iommu + * @cr: contents of cam and ram register + * @buf: output buffer + **/ +static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, + char *buf) +{ + char *p = buf; + + /* FIXME: Need more detail analysis of cam/ram */ + p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram, + (cr->cam & MMU_CAM_P) ? 1 : 0); + + return p - buf; +} + +/** * omap_dump_tlb_entries - dump cr arrays to given buffer * @obj: target iommu * @buf: output buffer @@ -1008,7 +1122,6 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz) e->da = da; e->pa = pa; e->valid = MMU_CAM_V; - /* FIXME: add OMAP1 support */ e->pgsz = pgsz; e->endian = MMU_RAM_ENDIAN_LITTLE; e->elsz = MMU_RAM_ELSZ_8; diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h index 45fe67d..0516e0e 100644 --- a/drivers/iommu/omap-iommu.h +++ b/drivers/iommu/omap-iommu.h @@ -13,10 +13,6 @@ #ifndef _OMAP_IOMMU_H #define _OMAP_IOMMU_H -#if defined(CONFIG_ARCH_OMAP1) -#error "iommu for this processor not implemented yet" -#endif - struct iotlb_entry { u32 da; u32 pa; @@ -68,31 +64,6 @@ struct cr_regs { }; }; -/* architecture specific functions */ -struct iommu_functions { - int (*enable)(struct omap_iommu *obj); - void (*disable)(struct omap_iommu *obj); - void (*set_twl)(struct omap_iommu *obj, bool on); - u32 (*fault_isr)(struct omap_iommu *obj, u32 *ra); - - void (*tlb_read_cr)(struct omap_iommu *obj, struct cr_regs *cr); - void (*tlb_load_cr)(struct omap_iommu *obj, struct cr_regs *cr); - - struct cr_regs *(*alloc_cr)(struct omap_iommu *obj, - struct iotlb_entry *e); - int (*cr_valid)(struct cr_regs *cr); - u32 (*cr_to_virt)(struct cr_regs *cr); - void (*cr_to_e)(struct cr_regs *cr, struct iotlb_entry *e); - ssize_t (*dump_cr)(struct omap_iommu *obj, struct cr_regs *cr, - char *buf); - - u32 (*get_pte_attr)(struct iotlb_entry *e); - - void (*save_ctx)(struct omap_iommu *obj); - void (*restore_ctx)(struct omap_iommu *obj); - ssize_t (*dump_ctx)(struct omap_iommu *obj, char *buf, ssize_t len); -}; - /** * dev_to_omap_iommu() - retrieves an omap iommu object from a user device * @dev: iommu client device @@ -130,6 +101,28 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) /* * MMU Register bit definitions */ +/* IRQSTATUS & IRQENABLE */ +#define MMU_IRQ_MULTIHITFAULT (1 << 4) +#define MMU_IRQ_TABLEWALKFAULT (1 << 3) +#define MMU_IRQ_EMUMISS (1 << 2) +#define MMU_IRQ_TRANSLATIONFAULT (1 << 1) +#define MMU_IRQ_TLBMISS (1 << 0) + +#define __MMU_IRQ_FAULT \ + (MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT) +#define MMU_IRQ_MASK \ + (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS) +#define MMU_IRQ_TWL_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT) +#define MMU_IRQ_TLB_MISS_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS) + +/* MMU_CNTL */ +#define MMU_CNTL_SHIFT 1 +#define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT) +#define MMU_CNTL_EML_TLB (1 << 3) +#define MMU_CNTL_TWL_EN (1 << 2) +#define MMU_CNTL_MMU_EN (1 << 1) + +/* CAM */ #define MMU_CAM_VATAG_SHIFT 12 #define MMU_CAM_VATAG_MASK \ ((~0UL >> MMU_CAM_VATAG_SHIFT) << MMU_CAM_VATAG_SHIFT) @@ -141,6 +134,7 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) #define MMU_CAM_PGSZ_4K (2 << 0) #define MMU_CAM_PGSZ_16M (3 << 0) +/* RAM */ #define MMU_RAM_PADDR_SHIFT 12 #define MMU_RAM_PADDR_MASK \ ((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT) @@ -162,6 +156,12 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) #define MMU_GP_REG_BUS_ERR_BACK_EN 0x1 +#define get_cam_va_mask(pgsz) \ + (((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \ + ((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \ + ((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \ + ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0) + /* * utilities for super page(16MB, 1MB, 64KB and 4KB) */ @@ -197,9 +197,6 @@ omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e); extern int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *)); -extern int omap_install_iommu_arch(const struct iommu_functions *ops); -extern void omap_uninstall_iommu_arch(const struct iommu_functions *ops); - extern ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len); extern size_t diff --git a/drivers/iommu/omap-iommu2.c b/drivers/iommu/omap-iommu2.c deleted file mode 100644 index ce2fff3..0000000 --- a/drivers/iommu/omap-iommu2.c +++ /dev/null @@ -1,311 +0,0 @@ -/* - * omap iommu: omap2/3 architecture specific functions - * - * Copyright (C) 2008-2009 Nokia Corporation - * - * Written by Hiroshi DOYU , - * Paul Mundt and Toshihiro Kobayashi - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "omap-iommu.h" - -/* - * omap2 architecture specific register bit definitions - */ -/* IRQSTATUS & IRQENABLE */ -#define MMU_IRQ_MULTIHITFAULT (1 << 4) -#define MMU_IRQ_TABLEWALKFAULT (1 << 3) -#define MMU_IRQ_EMUMISS (1 << 2) -#define MMU_IRQ_TRANSLATIONFAULT (1 << 1) -#define MMU_IRQ_TLBMISS (1 << 0) - -#define __MMU_IRQ_FAULT \ - (MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT) -#define MMU_IRQ_MASK \ - (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS) -#define MMU_IRQ_TWL_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT) -#define MMU_IRQ_TLB_MISS_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS) - -/* MMU_CNTL */ -#define MMU_CNTL_SHIFT 1 -#define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT) -#define MMU_CNTL_EML_TLB (1 << 3) -#define MMU_CNTL_TWL_EN (1 << 2) -#define MMU_CNTL_MMU_EN (1 << 1) - -#define get_cam_va_mask(pgsz) \ - (((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \ - ((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \ - ((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \ - ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0) - -static void __iommu_set_twl(struct omap_iommu *obj, bool on) -{ - u32 l = iommu_read_reg(obj, MMU_CNTL); - - if (on) - iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE); - else - iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE); - - l &= ~MMU_CNTL_MASK; - if (on) - l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN); - else - l |= (MMU_CNTL_MMU_EN); - - iommu_write_reg(obj, l, MMU_CNTL); -} - - -static int omap2_iommu_enable(struct omap_iommu *obj) -{ - u32 l, pa; - - if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K)) - return -EINVAL; - - pa = virt_to_phys(obj->iopgd); - if (!IS_ALIGNED(pa, SZ_16K)) - return -EINVAL; - - l = iommu_read_reg(obj, MMU_REVISION); - dev_info(obj->dev, "%s: version %d.%d\n", obj->name, - (l >> 4) & 0xf, l & 0xf); - - iommu_write_reg(obj, pa, MMU_TTB); - - if (obj->has_bus_err_back) - iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG); - - __iommu_set_twl(obj, true); - - return 0; -} - -static void omap2_iommu_disable(struct omap_iommu *obj) -{ - u32 l = iommu_read_reg(obj, MMU_CNTL); - - l &= ~MMU_CNTL_MASK; - iommu_write_reg(obj, l, MMU_CNTL); - - dev_dbg(obj->dev, "%s is shutting down\n", obj->name); -} - -static void omap2_iommu_set_twl(struct omap_iommu *obj, bool on) -{ - __iommu_set_twl(obj, false); -} - -static u32 omap2_iommu_fault_isr(struct omap_iommu *obj, u32 *ra) -{ - u32 stat, da; - - stat = iommu_read_reg(obj, MMU_IRQSTATUS); - stat &= MMU_IRQ_MASK; - if (!stat) { - *ra = 0; - return 0; - } - - da = iommu_read_reg(obj, MMU_FAULT_AD); - *ra = da; - - iommu_write_reg(obj, stat, MMU_IRQSTATUS); - - return stat; -} - -static void omap2_tlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) -{ - cr->cam = iommu_read_reg(obj, MMU_READ_CAM); - cr->ram = iommu_read_reg(obj, MMU_READ_RAM); -} - -static void omap2_tlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) -{ - iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM); - iommu_write_reg(obj, cr->ram, MMU_RAM); -} - -static u32 omap2_cr_to_virt(struct cr_regs *cr) -{ - u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK; - u32 mask = get_cam_va_mask(cr->cam & page_size); - - return cr->cam & mask; -} - -static struct cr_regs *omap2_alloc_cr(struct omap_iommu *obj, - struct iotlb_entry *e) -{ - struct cr_regs *cr; - - if (e->da & ~(get_cam_va_mask(e->pgsz))) { - dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__, - e->da); - return ERR_PTR(-EINVAL); - } - - cr = kmalloc(sizeof(*cr), GFP_KERNEL); - if (!cr) - return ERR_PTR(-ENOMEM); - - cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid; - cr->ram = e->pa | e->endian | e->elsz | e->mixed; - - return cr; -} - -static inline int omap2_cr_valid(struct cr_regs *cr) -{ - return cr->cam & MMU_CAM_V; -} - -static u32 omap2_get_pte_attr(struct iotlb_entry *e) -{ - u32 attr; - - attr = e->mixed << 5; - attr |= e->endian; - attr |= e->elsz >> 3; - attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) || - (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6); - return attr; -} - -static ssize_t -omap2_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, char *buf) -{ - char *p = buf; - - /* FIXME: Need more detail analysis of cam/ram */ - p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram, - (cr->cam & MMU_CAM_P) ? 1 : 0); - - return p - buf; -} - -#define pr_reg(name) \ - do { \ - ssize_t bytes; \ - const char *str = "%20s: %08x\n"; \ - const int maxcol = 32; \ - bytes = snprintf(p, maxcol, str, __stringify(name), \ - iommu_read_reg(obj, MMU_##name)); \ - p += bytes; \ - len -= bytes; \ - if (len < maxcol) \ - goto out; \ - } while (0) - -static ssize_t -omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len) -{ - char *p = buf; - - pr_reg(REVISION); - pr_reg(IRQSTATUS); - pr_reg(IRQENABLE); - pr_reg(WALKING_ST); - pr_reg(CNTL); - pr_reg(FAULT_AD); - pr_reg(TTB); - pr_reg(LOCK); - pr_reg(LD_TLB); - pr_reg(CAM); - pr_reg(RAM); - pr_reg(GFLUSH); - pr_reg(FLUSH_ENTRY); - pr_reg(READ_CAM); - pr_reg(READ_RAM); - pr_reg(EMU_FAULT_AD); -out: - return p - buf; -} - -static void omap2_iommu_save_ctx(struct omap_iommu *obj) -{ - int i; - u32 *p = obj->ctx; - - for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { - p[i] = iommu_read_reg(obj, i * sizeof(u32)); - dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]); - } -} - -static void omap2_iommu_restore_ctx(struct omap_iommu *obj) -{ - int i; - u32 *p = obj->ctx; - - for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { - iommu_write_reg(obj, p[i], i * sizeof(u32)); - dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]); - } -} - -static void omap2_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) -{ - e->da = cr->cam & MMU_CAM_VATAG_MASK; - e->pa = cr->ram & MMU_RAM_PADDR_MASK; - e->valid = cr->cam & MMU_CAM_V; - e->pgsz = cr->cam & MMU_CAM_PGSZ_MASK; - e->endian = cr->ram & MMU_RAM_ENDIAN_MASK; - e->elsz = cr->ram & MMU_RAM_ELSZ_MASK; - e->mixed = cr->ram & MMU_RAM_MIXED; -} - -static const struct iommu_functions omap2_iommu_ops = { - .enable = omap2_iommu_enable, - .disable = omap2_iommu_disable, - .set_twl = omap2_iommu_set_twl, - .fault_isr = omap2_iommu_fault_isr, - - .tlb_read_cr = omap2_tlb_read_cr, - .tlb_load_cr = omap2_tlb_load_cr, - - .cr_to_e = omap2_cr_to_e, - .cr_to_virt = omap2_cr_to_virt, - .alloc_cr = omap2_alloc_cr, - .cr_valid = omap2_cr_valid, - .dump_cr = omap2_dump_cr, - - .get_pte_attr = omap2_get_pte_attr, - - .save_ctx = omap2_iommu_save_ctx, - .restore_ctx = omap2_iommu_restore_ctx, - .dump_ctx = omap2_iommu_dump_ctx, -}; - -static int __init omap2_iommu_init(void) -{ - return omap_install_iommu_arch(&omap2_iommu_ops); -} -module_init(omap2_iommu_init); - -static void __exit omap2_iommu_exit(void) -{ - omap_uninstall_iommu_arch(&omap2_iommu_ops); -} -module_exit(omap2_iommu_exit); - -MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi"); -MODULE_DESCRIPTION("omap iommu: omap2/3 architecture specific functions"); -MODULE_LICENSE("GPL v2"); -- cgit v0.10.2 From 68570a744631ab4f2d378c9d7ef794e66e623a81 Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Wed, 22 Oct 2014 17:22:28 -0500 Subject: iommu/omap: Fix the permissions on nr_tlb_entries The permissions on the debugfs entry "nr_tlb_entries" should have been octal, not decimal, so fix it. Signed-off-by: Suman Anna Acked-by: Laurent Pinchart Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c index 0fb92aa..a520438 100644 --- a/drivers/iommu/omap-iommu-debug.c +++ b/drivers/iommu/omap-iommu-debug.c @@ -256,7 +256,7 @@ static int iommu_debug_register(struct device *dev, void *data) goto nomem; parent = d; - d = debugfs_create_u8("nr_tlb_entries", 400, parent, + d = debugfs_create_u8("nr_tlb_entries", 0400, parent, (u8 *)&obj->nr_tlb_entries); if (!d) goto nomem; -- cgit v0.10.2 From 3ca5db072c770c950a816c46ef2330d9e0ced8a7 Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Wed, 22 Oct 2014 17:22:29 -0500 Subject: iommu/omap: Make pagetable debugfs entry read-only Remove the writeability on the 'pagetable' debugfs entry, so that the mapping/unmapping into an OMAP IOMMU is only limited to actual client devices/drivers at kernel-level. Signed-off-by: Suman Anna Acked-by: Laurent Pinchart Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c index a520438..28de657 100644 --- a/drivers/iommu/omap-iommu-debug.c +++ b/drivers/iommu/omap-iommu-debug.c @@ -24,8 +24,6 @@ #include "omap-iopgtable.h" #include "omap-iommu.h" -#define MAXCOLUMN 100 /* for short messages */ - static DEFINE_MUTEX(iommu_debug_lock); static struct dentry *iommu_debug_root; @@ -82,39 +80,6 @@ static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, return bytes; } -static ssize_t debug_write_pagetable(struct file *file, - const char __user *userbuf, size_t count, loff_t *ppos) -{ - struct iotlb_entry e; - struct cr_regs cr; - int err; - struct device *dev = file->private_data; - struct omap_iommu *obj = dev_to_omap_iommu(dev); - char buf[MAXCOLUMN], *p = buf; - - count = min(count, sizeof(buf)); - - mutex_lock(&iommu_debug_lock); - if (copy_from_user(p, userbuf, count)) { - mutex_unlock(&iommu_debug_lock); - return -EFAULT; - } - - sscanf(p, "%x %x", &cr.cam, &cr.ram); - if (!cr.cam || !cr.ram) { - mutex_unlock(&iommu_debug_lock); - return -EINVAL; - } - - omap_iotlb_cr_to_e(&cr, &e); - err = omap_iopgtable_store_entry(obj, &e); - if (err) - dev_err(obj->dev, "%s: fail to store cr\n", __func__); - - mutex_unlock(&iommu_debug_lock); - return count; -} - #define dump_ioptable_entry_one(lv, da, val) \ ({ \ int __err = 0; \ @@ -202,14 +167,6 @@ static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, return bytes; } -#define DEBUG_FOPS(name) \ - static const struct file_operations debug_##name##_fops = { \ - .open = simple_open, \ - .read = debug_read_##name, \ - .write = debug_write_##name, \ - .llseek = generic_file_llseek, \ - }; - #define DEBUG_FOPS_RO(name) \ static const struct file_operations debug_##name##_fops = { \ .open = simple_open, \ @@ -219,7 +176,7 @@ static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, DEBUG_FOPS_RO(regs); DEBUG_FOPS_RO(tlb); -DEBUG_FOPS(pagetable); +DEBUG_FOPS_RO(pagetable); #define __DEBUG_ADD_FILE(attr, mode) \ { \ @@ -230,7 +187,6 @@ DEBUG_FOPS(pagetable); return -ENOMEM; \ } -#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 0600) #define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400) static int iommu_debug_register(struct device *dev, void *data) @@ -263,7 +219,7 @@ static int iommu_debug_register(struct device *dev, void *data) DEBUG_ADD_FILE_RO(regs); DEBUG_ADD_FILE_RO(tlb); - DEBUG_ADD_FILE(pagetable); + DEBUG_ADD_FILE_RO(pagetable); return 0; -- cgit v0.10.2 From 61c753526dc3ef91a0601e0bf2bdeeb6c415e747 Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Wed, 22 Oct 2014 17:22:30 -0500 Subject: iommu/omap: Integrate omap-iommu-debug into omap-iommu The debugfs support for OMAP IOMMU is currently implemented as a module, warranting certain OMAP-specific IOMMU API to be exported. The OMAP IOMMU, when enabled, can only be built-in into the kernel, so integrate the OMAP IOMMU debug module into the OMAP IOMMU driver. This helps in eliminating the need to export most of the current OMAP IOMMU API. The following are the main changes: - The debugfs directory and entry creation logic is reversed, the calls are invoked by the OMAP IOMMU driver now. - The current iffy circular logic of adding IOMMU archdata to the IOMMU devices itself to get a pointer to the omap_iommu object in the debugfs support code is replaced by directly using the omap_iommu structure while creating the debugfs entries. - The debugfs root directory is renamed from the generic name "iommu" to a specific name "omap_iommu". - Unneeded headers have also been cleaned up while at this. - There will no longer be a omap-iommu-debug.ko module after this patch. - The OMAP_IOMMU_DEBUG Kconfig option is converted to boolean only, the OMAP IOMMU debugfs support is built alongside the OMAP IOMMU driver only when this option is enabled. Signed-off-by: Suman Anna Acked-by: Laurent Pinchart Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index dd51122..1d54996 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -144,13 +144,13 @@ config OMAP_IOMMU select IOMMU_API config OMAP_IOMMU_DEBUG - tristate "Export OMAP IOMMU internals in DebugFS" - depends on OMAP_IOMMU && DEBUG_FS - help - Select this to see extensive information about - the internal state of OMAP IOMMU in debugfs. + bool "Export OMAP IOMMU internals in DebugFS" + depends on OMAP_IOMMU && DEBUG_FS + ---help--- + Select this to see extensive information about + the internal state of OMAP IOMMU in debugfs. - Say N unless you know you need this. + Say N unless you know you need this. config TEGRA_IOMMU_GART bool "Tegra GART IOMMU Support" diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c index 28de657..4813d3a 100644 --- a/drivers/iommu/omap-iommu-debug.c +++ b/drivers/iommu/omap-iommu-debug.c @@ -10,15 +10,11 @@ * published by the Free Software Foundation. */ -#include #include -#include #include #include #include -#include #include -#include #include #include "omap-iopgtable.h" @@ -31,8 +27,7 @@ static struct dentry *iommu_debug_root; static ssize_t debug_read_regs(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { - struct device *dev = file->private_data; - struct omap_iommu *obj = dev_to_omap_iommu(dev); + struct omap_iommu *obj = file->private_data; char *p, *buf; ssize_t bytes; @@ -55,8 +50,7 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf, static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { - struct device *dev = file->private_data; - struct omap_iommu *obj = dev_to_omap_iommu(dev); + struct omap_iommu *obj = file->private_data; char *p, *buf; ssize_t bytes, rest; @@ -141,8 +135,7 @@ out: static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { - struct device *dev = file->private_data; - struct omap_iommu *obj = dev_to_omap_iommu(dev); + struct omap_iommu *obj = file->private_data; char *p, *buf; size_t bytes; @@ -181,93 +174,56 @@ DEBUG_FOPS_RO(pagetable); #define __DEBUG_ADD_FILE(attr, mode) \ { \ struct dentry *dent; \ - dent = debugfs_create_file(#attr, mode, parent, \ - dev, &debug_##attr##_fops); \ + dent = debugfs_create_file(#attr, mode, obj->debug_dir, \ + obj, &debug_##attr##_fops); \ if (!dent) \ - return -ENOMEM; \ + goto err; \ } #define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400) -static int iommu_debug_register(struct device *dev, void *data) +void omap_iommu_debugfs_add(struct omap_iommu *obj) { - struct platform_device *pdev = to_platform_device(dev); - struct omap_iommu *obj = platform_get_drvdata(pdev); - struct omap_iommu_arch_data *arch_data; - struct dentry *d, *parent; - - if (!obj || !obj->dev) - return -EINVAL; - - arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL); - if (!arch_data) - return -ENOMEM; - - arch_data->iommu_dev = obj; + struct dentry *d; - dev->archdata.iommu = arch_data; + if (!iommu_debug_root) + return; - d = debugfs_create_dir(obj->name, iommu_debug_root); - if (!d) - goto nomem; - parent = d; + obj->debug_dir = debugfs_create_dir(obj->name, iommu_debug_root); + if (!obj->debug_dir) + return; - d = debugfs_create_u8("nr_tlb_entries", 0400, parent, + d = debugfs_create_u8("nr_tlb_entries", 0400, obj->debug_dir, (u8 *)&obj->nr_tlb_entries); if (!d) - goto nomem; + return; DEBUG_ADD_FILE_RO(regs); DEBUG_ADD_FILE_RO(tlb); DEBUG_ADD_FILE_RO(pagetable); - return 0; + return; -nomem: - kfree(arch_data); - return -ENOMEM; +err: + debugfs_remove_recursive(obj->debug_dir); } -static int iommu_debug_unregister(struct device *dev, void *data) +void omap_iommu_debugfs_remove(struct omap_iommu *obj) { - if (!dev->archdata.iommu) - return 0; - - kfree(dev->archdata.iommu); - - dev->archdata.iommu = NULL; + if (!obj->debug_dir) + return; - return 0; + debugfs_remove_recursive(obj->debug_dir); } -static int __init iommu_debug_init(void) +void __init omap_iommu_debugfs_init(void) { - struct dentry *d; - int err; - - d = debugfs_create_dir("iommu", NULL); - if (!d) - return -ENOMEM; - iommu_debug_root = d; - - err = omap_foreach_iommu_device(d, iommu_debug_register); - if (err) - goto err_out; - return 0; - -err_out: - debugfs_remove_recursive(iommu_debug_root); - return err; + iommu_debug_root = debugfs_create_dir("omap_iommu", NULL); + if (!iommu_debug_root) + pr_err("can't create debugfs dir\n"); } -module_init(iommu_debug_init) -static void __exit iommu_debugfs_exit(void) +void __exit omap_iommu_debugfs_exit(void) { - debugfs_remove_recursive(iommu_debug_root); - omap_foreach_iommu_device(NULL, iommu_debug_unregister); + debugfs_remove(iommu_debug_root); } -module_exit(iommu_debugfs_exit) - -MODULE_DESCRIPTION("omap iommu: debugfs interface"); -MODULE_AUTHOR("Hiroshi DOYU "); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 91262fa..b92b6fc 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -472,7 +472,7 @@ static void flush_iotlb_all(struct omap_iommu *obj) pm_runtime_put_sync(obj->dev); } -#if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) +#ifdef CONFIG_OMAP_IOMMU_DEBUG #define pr_reg(name) \ do { \ @@ -602,7 +602,7 @@ int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *)) } EXPORT_SYMBOL_GPL(omap_foreach_iommu_device); -#endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */ +#endif /* CONFIG_OMAP_IOMMU_DEBUG */ /* * H/W pagetable operations @@ -1077,6 +1077,8 @@ static int omap_iommu_probe(struct platform_device *pdev) pm_runtime_irq_safe(obj->dev); pm_runtime_enable(obj->dev); + omap_iommu_debugfs_add(obj); + dev_info(&pdev->dev, "%s registered\n", obj->name); return 0; } @@ -1086,6 +1088,7 @@ static int omap_iommu_remove(struct platform_device *pdev) struct omap_iommu *obj = platform_get_drvdata(pdev); iopgtable_clear_entry_all(obj); + omap_iommu_debugfs_remove(obj); pm_runtime_disable(obj->dev); @@ -1403,6 +1406,8 @@ static int __init omap_iommu_init(void) bus_set_iommu(&platform_bus_type, &omap_iommu_ops); + omap_iommu_debugfs_init(); + return platform_driver_register(&omap_iommu_driver); } /* must be ready before omap3isp is probed */ @@ -1413,6 +1418,8 @@ static void __exit omap_iommu_exit(void) kmem_cache_destroy(iopte_cachep); platform_driver_unregister(&omap_iommu_driver); + + omap_iommu_debugfs_exit(); } module_exit(omap_iommu_exit); diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h index 0516e0e..4783779 100644 --- a/drivers/iommu/omap-iommu.h +++ b/drivers/iommu/omap-iommu.h @@ -30,6 +30,7 @@ struct omap_iommu { void __iomem *regbase; struct device *dev; struct iommu_domain *domain; + struct dentry *debug_dir; spinlock_t iommu_lock; /* global for this whole object */ @@ -197,11 +198,25 @@ omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e); extern int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *)); +#ifdef CONFIG_OMAP_IOMMU_DEBUG extern ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len); extern size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len); +void omap_iommu_debugfs_init(void); +void omap_iommu_debugfs_exit(void); + +void omap_iommu_debugfs_add(struct omap_iommu *obj); +void omap_iommu_debugfs_remove(struct omap_iommu *obj); +#else +static inline void omap_iommu_debugfs_init(void) { } +static inline void omap_iommu_debugfs_exit(void) { } + +static inline void omap_iommu_debugfs_add(struct omap_iommu *obj) { } +static inline void omap_iommu_debugfs_remove(struct omap_iommu *obj) { } +#endif + /* * register accessors */ -- cgit v0.10.2 From c55caec124512d8192ff21eb3c08df55c94f2db3 Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Wed, 22 Oct 2014 17:22:31 -0500 Subject: iommu/omap: Remove couple of unused exported functions The exported functions omap_foreach_iommu_device() and omap_iotlb_cr_to_e() have been deleted, as they are no longer needed. The function omap_foreach_iommu_device() is not required after the consolidation of the OMAP IOMMU debug module, and the function omap_iotlb_cr_to_e() is not required after making the debugfs entry 'pagetable' read-only. Signed-off-by: Suman Anna Acked-by: Laurent Pinchart Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index b92b6fc..9171112 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -203,20 +203,6 @@ static void iommu_disable(struct omap_iommu *obj) /* * TLB operations */ -void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) -{ - BUG_ON(!cr || !e); - - e->da = cr->cam & MMU_CAM_VATAG_MASK; - e->pa = cr->ram & MMU_RAM_PADDR_MASK; - e->valid = cr->cam & MMU_CAM_V; - e->pgsz = cr->cam & MMU_CAM_PGSZ_MASK; - e->endian = cr->ram & MMU_RAM_ENDIAN_MASK; - e->elsz = cr->ram & MMU_RAM_ELSZ_MASK; - e->mixed = cr->ram & MMU_RAM_MIXED; -} -EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e); - static inline int iotlb_cr_valid(struct cr_regs *cr) { if (!cr) @@ -595,13 +581,6 @@ size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes) } EXPORT_SYMBOL_GPL(omap_dump_tlb_entries); -int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *)) -{ - return driver_for_each_device(&omap_iommu_driver.driver, - NULL, data, fn); -} -EXPORT_SYMBOL_GPL(omap_foreach_iommu_device); - #endif /* CONFIG_OMAP_IOMMU_DEBUG */ /* diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h index 4783779..b18cecc 100644 --- a/drivers/iommu/omap-iommu.h +++ b/drivers/iommu/omap-iommu.h @@ -190,14 +190,9 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) /* * global functions */ -extern void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e); - extern int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e); -extern int omap_foreach_iommu_device(void *data, - int (*fn)(struct device *, void *)); - #ifdef CONFIG_OMAP_IOMMU_DEBUG extern ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len); -- cgit v0.10.2 From 4899a5636d535ba573a7e52b5ad7be1f886d40a3 Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Wed, 22 Oct 2014 17:22:32 -0500 Subject: iommu/omap: Do not export unneeded functions The following functions were exported previously for usage by the OMAP IOMMU debug module: omap_iommu_dump_ctx() omap_dump_tlb_entries() omap_iopgtable_store_entry() These functions need not be exported anymore as the OMAP IOMMU debugfs code is integrated with the OMAP IOMMU driver, and there won't be external users for these functions. So, remove the EXPORT_SYMBOL_GPL on these. The omap_iopgtable_store_entry() is also made internal only, after making the 'pagetable' debugfs entry read-only. Signed-off-by: Suman Anna Acked-by: Laurent Pinchart Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 9171112..3dcaef0 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -511,7 +511,6 @@ ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes) return bytes; } -EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx); static int __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num) @@ -579,7 +578,6 @@ size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes) return p - buf; } -EXPORT_SYMBOL_GPL(omap_dump_tlb_entries); #endif /* CONFIG_OMAP_IOMMU_DEBUG */ @@ -764,7 +762,8 @@ iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e) * @obj: target iommu * @e: an iommu tlb entry info **/ -int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) +static int +omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) { int err; @@ -774,7 +773,6 @@ int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) prefetch_iotlb_entry(obj, e); return err; } -EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry); /** * iopgtable_lookup_entry - Lookup an iommu pte entry diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h index b18cecc..d736630 100644 --- a/drivers/iommu/omap-iommu.h +++ b/drivers/iommu/omap-iommu.h @@ -190,9 +190,6 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) /* * global functions */ -extern int -omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e); - #ifdef CONFIG_OMAP_IOMMU_DEBUG extern ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len); -- cgit v0.10.2 From f24d9ad3fafd7c4ee47ec75947657d768873b6e4 Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Wed, 22 Oct 2014 17:22:33 -0500 Subject: iommu/omap: Reset the domain field upon detaching The .domain field in omap_iommu struct is set properly when the OMAP IOMMU device is attached to, but is never reset properly on detach. Reset this properly so that the OMAP IOMMU debugfs logic can depend on this field before allowing the debugfs operations. Signed-off-by: Suman Anna Acked-by: Laurent Pinchart Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 3dcaef0..2ba3219 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1206,6 +1206,7 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain, omap_domain->iommu_dev = arch_data->iommu_dev = NULL; omap_domain->dev = NULL; + oiommu->domain = NULL; } static void omap_iommu_detach_dev(struct iommu_domain *domain, -- cgit v0.10.2 From c5cf5c5377fbd0a1ff2a09ad1f0c7e261aabc567 Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Wed, 22 Oct 2014 17:22:34 -0500 Subject: iommu/omap: Fix bus error on debugfs access of unattached IOMMU Any debugfs access on an OMAP IOMMU that is not enabled (done during attach) results in a bus error due to access of registers without the clock or the reset enabled for the respective IOMMU. So, add a check to make sure the IOMMU is enabled/attached by a client device. This gracefully prints a "Operation not permitted" trace when the corresponding IOMMU is not enabled. Signed-off-by: Suman Anna Acked-by: Laurent Pinchart Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c index 4813d3a..41b09a1 100644 --- a/drivers/iommu/omap-iommu-debug.c +++ b/drivers/iommu/omap-iommu-debug.c @@ -24,6 +24,11 @@ static DEFINE_MUTEX(iommu_debug_lock); static struct dentry *iommu_debug_root; +static inline bool is_omap_iommu_detached(struct omap_iommu *obj) +{ + return !obj->domain; +} + static ssize_t debug_read_regs(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { @@ -31,6 +36,9 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf, char *p, *buf; ssize_t bytes; + if (is_omap_iommu_detached(obj)) + return -EPERM; + buf = kmalloc(count, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -54,6 +62,9 @@ static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, char *p, *buf; ssize_t bytes, rest; + if (is_omap_iommu_detached(obj)) + return -EPERM; + buf = kmalloc(count, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -139,6 +150,9 @@ static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, char *p, *buf; size_t bytes; + if (is_omap_iommu_detached(obj)) + return -EPERM; + buf = (char *)__get_free_page(GFP_KERNEL); if (!buf) return -ENOMEM; -- cgit v0.10.2 From 9c83e9f384f5d1513e42935af43c13a601aad842 Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Wed, 22 Oct 2014 17:22:35 -0500 Subject: iommu/omap: Switch pagetable debugfs entry to use seq_file The debugfs entry 'pagetable' that shows the page table entry (PTE) data currently outputs only data that can be fit into a page. Switch the entry to use the seq_file interface so that it can show all the valid page table entries. The patch also corrected the output for L2 entries, and prints the proper L2 PTE instead of the previous L1 page descriptor pointer. Signed-off-by: Suman Anna Acked-by: Laurent Pinchart Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c index 41b09a1..f3d20a2 100644 --- a/drivers/iommu/omap-iommu-debug.c +++ b/drivers/iommu/omap-iommu-debug.c @@ -85,95 +85,70 @@ static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, return bytes; } -#define dump_ioptable_entry_one(lv, da, val) \ - ({ \ - int __err = 0; \ - ssize_t bytes; \ - const int maxcol = 22; \ - const char *str = "%d: %08x %08x\n"; \ - bytes = snprintf(p, maxcol, str, lv, da, val); \ - p += bytes; \ - len -= bytes; \ - if (len < maxcol) \ - __err = -ENOMEM; \ - __err; \ - }) - -static ssize_t dump_ioptable(struct omap_iommu *obj, char *buf, ssize_t len) +static void dump_ioptable(struct seq_file *s) { - int i; - u32 *iopgd; - char *p = buf; + int i, j; + u32 da; + u32 *iopgd, *iopte; + struct omap_iommu *obj = s->private; spin_lock(&obj->page_table_lock); iopgd = iopgd_offset(obj, 0); for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) { - int j, err; - u32 *iopte; - u32 da; - if (!*iopgd) continue; if (!(*iopgd & IOPGD_TABLE)) { da = i << IOPGD_SHIFT; - - err = dump_ioptable_entry_one(1, da, *iopgd); - if (err) - goto out; + seq_printf(s, "1: 0x%08x 0x%08x\n", da, *iopgd); continue; } iopte = iopte_offset(iopgd, 0); - for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) { if (!*iopte) continue; da = (i << IOPGD_SHIFT) + (j << IOPTE_SHIFT); - err = dump_ioptable_entry_one(2, da, *iopgd); - if (err) - goto out; + seq_printf(s, "2: 0x%08x 0x%08x\n", da, *iopte); } } -out: - spin_unlock(&obj->page_table_lock); - return p - buf; + spin_unlock(&obj->page_table_lock); } -static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, - size_t count, loff_t *ppos) +static int debug_read_pagetable(struct seq_file *s, void *data) { - struct omap_iommu *obj = file->private_data; - char *p, *buf; - size_t bytes; + struct omap_iommu *obj = s->private; if (is_omap_iommu_detached(obj)) return -EPERM; - buf = (char *)__get_free_page(GFP_KERNEL); - if (!buf) - return -ENOMEM; - p = buf; - - p += sprintf(p, "L: %8s %8s\n", "da:", "pa:"); - p += sprintf(p, "-----------------------------------------\n"); - mutex_lock(&iommu_debug_lock); - bytes = PAGE_SIZE - (p - buf); - p += dump_ioptable(obj, p, bytes); - - bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + seq_printf(s, "L: %8s %8s\n", "da:", "pte:"); + seq_puts(s, "--------------------------\n"); + dump_ioptable(s); mutex_unlock(&iommu_debug_lock); - free_page((unsigned long)buf); - return bytes; + return 0; } +#define DEBUG_SEQ_FOPS_RO(name) \ + static int debug_open_##name(struct inode *inode, struct file *file) \ + { \ + return single_open(file, debug_read_##name, inode->i_private); \ + } \ + \ + static const struct file_operations debug_##name##_fops = { \ + .open = debug_open_##name, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ + } + #define DEBUG_FOPS_RO(name) \ static const struct file_operations debug_##name##_fops = { \ .open = simple_open, \ @@ -183,7 +158,7 @@ static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, DEBUG_FOPS_RO(regs); DEBUG_FOPS_RO(tlb); -DEBUG_FOPS_RO(pagetable); +DEBUG_SEQ_FOPS_RO(pagetable); #define __DEBUG_ADD_FILE(attr, mode) \ { \ -- cgit v0.10.2 From c72acf69e2c0d884023296d0cceb16f3e75c09d9 Mon Sep 17 00:00:00 2001 From: SF Markus Elfring Date: Wed, 22 Oct 2014 20:00:17 +0200 Subject: iommu/msm: Deletion of unnecessary checks before clk_disable() A semantic patch approach was proposed with the subject "[PATCH with Coccinelle?] Deletion of unnecessary checks before specific function calls" on 2014-03-05. https://lkml.org/lkml/2014/3/5/344 http://article.gmane.org/gmane.comp.version-control.coccinelle/3513/ This patch pattern application was repeated with the help of the software "Coccinelle 1.0.0-rc22" on the source files for Linux 3.17.1. An extract of the automatically generated update suggestions is shown here. It was determined that the affected source code places call functions which perform input parameter validation already. It is therefore not needed that a similar safety check is repeated at the call site. Signed-off-by: Markus Elfring Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 6e3dcc28..3e4d888 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -73,8 +73,7 @@ fail: static void __disable_clocks(struct msm_iommu_drvdata *drvdata) { - if (drvdata->clk) - clk_disable(drvdata->clk); + clk_disable(drvdata->clk); clk_disable(drvdata->pclk); } diff --git a/drivers/iommu/msm_iommu_dev.c b/drivers/iommu/msm_iommu_dev.c index 61def7cb..9574d21 100644 --- a/drivers/iommu/msm_iommu_dev.c +++ b/drivers/iommu/msm_iommu_dev.c @@ -224,8 +224,7 @@ static int msm_iommu_probe(struct platform_device *pdev) platform_set_drvdata(pdev, drvdata); - if (iommu_clk) - clk_disable(iommu_clk); + clk_disable(iommu_clk); clk_disable(iommu_pclk); @@ -323,8 +322,7 @@ static int msm_iommu_ctx_probe(struct platform_device *pdev) SET_NSCFG(drvdata->base, mid, 3); } - if (drvdata->clk) - clk_disable(drvdata->clk); + clk_disable(drvdata->clk); clk_disable(drvdata->pclk); dev_info(&pdev->dev, "context %s using bank %d\n", c->name, c->num); -- cgit v0.10.2 From c68a292152d322b2d03f88d7f1d91d6bfa9d22bc Mon Sep 17 00:00:00 2001 From: Daniel Kurtz Date: Mon, 3 Nov 2014 10:53:27 +0800 Subject: iommu/rockchip: rk3288 iommu driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The rk3288 has several iommus. Each iommu belongs to a single master device. There is one device (ISP) that has two slave iommus, but that case is not yet supported by this driver. At subsys init, the iommu driver registers itself as the iommu driver for the platform bus. The master devices find their slave iommus using the "iommus" field in their devicetree description. Since each slave iommu belongs to exactly one master, their is no additional data needed at probe to associate a slave with its master. An iommu device's power domain, clock and irq are all shared with its master device, and the master device must be careful to attach from the iommu only after powering and clocking it (and leave it powered and clocked before detaching). Because their is no guarantee what the status of the iommu is at probe, and since the driver does not even know if the device is powered, we delay requesting its irq until the master device attaches, at which point we have a guarantee that the device is powered and clocked and we can reset it and disable its interrupt mask. An iommu_domain describes a virtual iova address space. Each iommu_domain has a corresponding page table that lists the mappings from iova to physical address. For the rk3288 iommu, the page table has two levels: The Level 1 "directory_table" has 1024 4-byte dte entries. Each dte points to a level 2 "page_table". Each level 2 page_table has 1024 4-byte pte entries. Each pte points to a 4 KiB page of memory. An iommu_domain is created when a dma_iommu_mapping is created via arm_iommu_create_mapping. Master devices can then attach themselves to this mapping (or attach the mapping to themselves?) by calling arm_iommu_attach_device(). This in turn instructs the iommu driver to write the page table's physical address into the slave iommu's "Directory Table Entry" (DTE) register. In fact multiple master devices, each with their own slave iommu device, can all attach to the same mapping. The iommus for these devices will share the same iommu_domain and therefore point to the same page table. Thus, the iommu domain maintains a list of iommu devices which are attached. This driver relies on the iommu core to ensure that all devices have detached before destroying a domain. v6: - add .add/remove_device() callbacks. - parse platform_device device tree nodes for "iommus" property - store platform device pointer as group iommudata - Check for existence of iommu group instead of relying on a dev_get_drvdata() to return NULL for a NULL device. v7: - fixup some strings. - In rk_iommu_disable_paging() # and % were reversed. Signed-off-by: Daniel Kurtz Signed-off-by: Simon Xue Reviewed-by: Grant Grundler Reviewed-by: Stéphane Marchesin Tested-by: Heiko Stuebner Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index dd51122..d0a1261 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -152,6 +152,18 @@ config OMAP_IOMMU_DEBUG Say N unless you know you need this. +config ROCKCHIP_IOMMU + bool "Rockchip IOMMU Support" + depends on ARCH_ROCKCHIP + select IOMMU_API + select ARM_DMA_USE_IOMMU + help + Support for IOMMUs found on Rockchip rk32xx SOCs. + These IOMMUs allow virtualization of the address space used by most + cores within the multimedia subsystem. + Say Y here if you are using a Rockchip SoC that includes an IOMMU + device. + config TEGRA_IOMMU_GART bool "Tegra GART IOMMU Support" depends on ARCH_TEGRA_2x_SOC diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 16edef7..3e47ef3 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o +obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c new file mode 100644 index 0000000..b2023af --- /dev/null +++ b/drivers/iommu/rockchip-iommu.c @@ -0,0 +1,1038 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** MMU register offsets */ +#define RK_MMU_DTE_ADDR 0x00 /* Directory table address */ +#define RK_MMU_STATUS 0x04 +#define RK_MMU_COMMAND 0x08 +#define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */ +#define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */ +#define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */ +#define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */ +#define RK_MMU_INT_MASK 0x1C /* IRQ enable */ +#define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */ +#define RK_MMU_AUTO_GATING 0x24 + +#define DTE_ADDR_DUMMY 0xCAFEBABE +#define FORCE_RESET_TIMEOUT 100 /* ms */ + +/* RK_MMU_STATUS fields */ +#define RK_MMU_STATUS_PAGING_ENABLED BIT(0) +#define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1) +#define RK_MMU_STATUS_STALL_ACTIVE BIT(2) +#define RK_MMU_STATUS_IDLE BIT(3) +#define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4) +#define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5) +#define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31) + +/* RK_MMU_COMMAND command values */ +#define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */ +#define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */ +#define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */ +#define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */ +#define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */ +#define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */ +#define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */ + +/* RK_MMU_INT_* register fields */ +#define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */ +#define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */ +#define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR) + +#define NUM_DT_ENTRIES 1024 +#define NUM_PT_ENTRIES 1024 + +#define SPAGE_ORDER 12 +#define SPAGE_SIZE (1 << SPAGE_ORDER) + + /* + * Support mapping any size that fits in one page table: + * 4 KiB to 4 MiB + */ +#define RK_IOMMU_PGSIZE_BITMAP 0x007ff000 + +#define IOMMU_REG_POLL_COUNT_FAST 1000 + +struct rk_iommu_domain { + struct list_head iommus; + u32 *dt; /* page directory table */ + spinlock_t iommus_lock; /* lock for iommus list */ + spinlock_t dt_lock; /* lock for modifying page directory table */ +}; + +struct rk_iommu { + struct device *dev; + void __iomem *base; + int irq; + struct list_head node; /* entry in rk_iommu_domain.iommus */ + struct iommu_domain *domain; /* domain to which iommu is attached */ +}; + +static inline void rk_table_flush(u32 *va, unsigned int count) +{ + phys_addr_t pa_start = virt_to_phys(va); + phys_addr_t pa_end = virt_to_phys(va + count); + size_t size = pa_end - pa_start; + + __cpuc_flush_dcache_area(va, size); + outer_flush_range(pa_start, pa_end); +} + +/** + * Inspired by _wait_for in intel_drv.h + * This is NOT safe for use in interrupt context. + * + * Note that it's important that we check the condition again after having + * timed out, since the timeout could be due to preemption or similar and + * we've never had a chance to check the condition before the timeout. + */ +#define rk_wait_for(COND, MS) ({ \ + unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \ + int ret__ = 0; \ + while (!(COND)) { \ + if (time_after(jiffies, timeout__)) { \ + ret__ = (COND) ? 0 : -ETIMEDOUT; \ + break; \ + } \ + usleep_range(50, 100); \ + } \ + ret__; \ +}) + +/* + * The Rockchip rk3288 iommu uses a 2-level page table. + * The first level is the "Directory Table" (DT). + * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing + * to a "Page Table". + * The second level is the 1024 Page Tables (PT). + * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to + * a 4 KB page of physical memory. + * + * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries). + * Each iommu device has a MMU_DTE_ADDR register that contains the physical + * address of the start of the DT page. + * + * The structure of the page table is as follows: + * + * DT + * MMU_DTE_ADDR -> +-----+ + * | | + * +-----+ PT + * | DTE | -> +-----+ + * +-----+ | | Memory + * | | +-----+ Page + * | | | PTE | -> +-----+ + * +-----+ +-----+ | | + * | | | | + * | | | | + * +-----+ | | + * | | + * | | + * +-----+ + */ + +/* + * Each DTE has a PT address and a valid bit: + * +---------------------+-----------+-+ + * | PT address | Reserved |V| + * +---------------------+-----------+-+ + * 31:12 - PT address (PTs always starts on a 4 KB boundary) + * 11: 1 - Reserved + * 0 - 1 if PT @ PT address is valid + */ +#define RK_DTE_PT_ADDRESS_MASK 0xfffff000 +#define RK_DTE_PT_VALID BIT(0) + +static inline phys_addr_t rk_dte_pt_address(u32 dte) +{ + return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK; +} + +static inline bool rk_dte_is_pt_valid(u32 dte) +{ + return dte & RK_DTE_PT_VALID; +} + +static u32 rk_mk_dte(u32 *pt) +{ + phys_addr_t pt_phys = virt_to_phys(pt); + return (pt_phys & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID; +} + +/* + * Each PTE has a Page address, some flags and a valid bit: + * +---------------------+---+-------+-+ + * | Page address |Rsv| Flags |V| + * +---------------------+---+-------+-+ + * 31:12 - Page address (Pages always start on a 4 KB boundary) + * 11: 9 - Reserved + * 8: 1 - Flags + * 8 - Read allocate - allocate cache space on read misses + * 7 - Read cache - enable cache & prefetch of data + * 6 - Write buffer - enable delaying writes on their way to memory + * 5 - Write allocate - allocate cache space on write misses + * 4 - Write cache - different writes can be merged together + * 3 - Override cache attributes + * if 1, bits 4-8 control cache attributes + * if 0, the system bus defaults are used + * 2 - Writable + * 1 - Readable + * 0 - 1 if Page @ Page address is valid + */ +#define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000 +#define RK_PTE_PAGE_FLAGS_MASK 0x000001fe +#define RK_PTE_PAGE_WRITABLE BIT(2) +#define RK_PTE_PAGE_READABLE BIT(1) +#define RK_PTE_PAGE_VALID BIT(0) + +static inline phys_addr_t rk_pte_page_address(u32 pte) +{ + return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK; +} + +static inline bool rk_pte_is_page_valid(u32 pte) +{ + return pte & RK_PTE_PAGE_VALID; +} + +/* TODO: set cache flags per prot IOMMU_CACHE */ +static u32 rk_mk_pte(phys_addr_t page, int prot) +{ + u32 flags = 0; + flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0; + flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0; + page &= RK_PTE_PAGE_ADDRESS_MASK; + return page | flags | RK_PTE_PAGE_VALID; +} + +static u32 rk_mk_pte_invalid(u32 pte) +{ + return pte & ~RK_PTE_PAGE_VALID; +} + +/* + * rk3288 iova (IOMMU Virtual Address) format + * 31 22.21 12.11 0 + * +-----------+-----------+-------------+ + * | DTE index | PTE index | Page offset | + * +-----------+-----------+-------------+ + * 31:22 - DTE index - index of DTE in DT + * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address + * 11: 0 - Page offset - offset into page @ PTE.page_address + */ +#define RK_IOVA_DTE_MASK 0xffc00000 +#define RK_IOVA_DTE_SHIFT 22 +#define RK_IOVA_PTE_MASK 0x003ff000 +#define RK_IOVA_PTE_SHIFT 12 +#define RK_IOVA_PAGE_MASK 0x00000fff +#define RK_IOVA_PAGE_SHIFT 0 + +static u32 rk_iova_dte_index(dma_addr_t iova) +{ + return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT; +} + +static u32 rk_iova_pte_index(dma_addr_t iova) +{ + return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT; +} + +static u32 rk_iova_page_offset(dma_addr_t iova) +{ + return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT; +} + +static u32 rk_iommu_read(struct rk_iommu *iommu, u32 offset) +{ + return readl(iommu->base + offset); +} + +static void rk_iommu_write(struct rk_iommu *iommu, u32 offset, u32 value) +{ + writel(value, iommu->base + offset); +} + +static void rk_iommu_command(struct rk_iommu *iommu, u32 command) +{ + writel(command, iommu->base + RK_MMU_COMMAND); +} + +static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova, + size_t size) +{ + dma_addr_t iova_end = iova + size; + /* + * TODO(djkurtz): Figure out when it is more efficient to shootdown the + * entire iotlb rather than iterate over individual iovas. + */ + for (; iova < iova_end; iova += SPAGE_SIZE) + rk_iommu_write(iommu, RK_MMU_ZAP_ONE_LINE, iova); +} + +static bool rk_iommu_is_stall_active(struct rk_iommu *iommu) +{ + return rk_iommu_read(iommu, RK_MMU_STATUS) & RK_MMU_STATUS_STALL_ACTIVE; +} + +static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu) +{ + return rk_iommu_read(iommu, RK_MMU_STATUS) & + RK_MMU_STATUS_PAGING_ENABLED; +} + +static int rk_iommu_enable_stall(struct rk_iommu *iommu) +{ + int ret; + + if (rk_iommu_is_stall_active(iommu)) + return 0; + + /* Stall can only be enabled if paging is enabled */ + if (!rk_iommu_is_paging_enabled(iommu)) + return 0; + + rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL); + + ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1); + if (ret) + dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n", + rk_iommu_read(iommu, RK_MMU_STATUS)); + + return ret; +} + +static int rk_iommu_disable_stall(struct rk_iommu *iommu) +{ + int ret; + + if (!rk_iommu_is_stall_active(iommu)) + return 0; + + rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL); + + ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1); + if (ret) + dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n", + rk_iommu_read(iommu, RK_MMU_STATUS)); + + return ret; +} + +static int rk_iommu_enable_paging(struct rk_iommu *iommu) +{ + int ret; + + if (rk_iommu_is_paging_enabled(iommu)) + return 0; + + rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING); + + ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1); + if (ret) + dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n", + rk_iommu_read(iommu, RK_MMU_STATUS)); + + return ret; +} + +static int rk_iommu_disable_paging(struct rk_iommu *iommu) +{ + int ret; + + if (!rk_iommu_is_paging_enabled(iommu)) + return 0; + + rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING); + + ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1); + if (ret) + dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n", + rk_iommu_read(iommu, RK_MMU_STATUS)); + + return ret; +} + +static int rk_iommu_force_reset(struct rk_iommu *iommu) +{ + int ret; + u32 dte_addr; + + /* + * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY + * and verifying that upper 5 nybbles are read back. + */ + rk_iommu_write(iommu, RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY); + + dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR); + if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) { + dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n"); + return -EFAULT; + } + + rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET); + + ret = rk_wait_for(rk_iommu_read(iommu, RK_MMU_DTE_ADDR) == 0x00000000, + FORCE_RESET_TIMEOUT); + if (ret) + dev_err(iommu->dev, "FORCE_RESET command timed out\n"); + + return ret; +} + +static void log_iova(struct rk_iommu *iommu, dma_addr_t iova) +{ + u32 dte_index, pte_index, page_offset; + u32 mmu_dte_addr; + phys_addr_t mmu_dte_addr_phys, dte_addr_phys; + u32 *dte_addr; + u32 dte; + phys_addr_t pte_addr_phys = 0; + u32 *pte_addr = NULL; + u32 pte = 0; + phys_addr_t page_addr_phys = 0; + u32 page_flags = 0; + + dte_index = rk_iova_dte_index(iova); + pte_index = rk_iova_pte_index(iova); + page_offset = rk_iova_page_offset(iova); + + mmu_dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR); + mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr; + + dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index); + dte_addr = phys_to_virt(dte_addr_phys); + dte = *dte_addr; + + if (!rk_dte_is_pt_valid(dte)) + goto print_it; + + pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4); + pte_addr = phys_to_virt(pte_addr_phys); + pte = *pte_addr; + + if (!rk_pte_is_page_valid(pte)) + goto print_it; + + page_addr_phys = rk_pte_page_address(pte) + page_offset; + page_flags = pte & RK_PTE_PAGE_FLAGS_MASK; + +print_it: + dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n", + &iova, dte_index, pte_index, page_offset); + dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n", + &mmu_dte_addr_phys, &dte_addr_phys, dte, + rk_dte_is_pt_valid(dte), &pte_addr_phys, pte, + rk_pte_is_page_valid(pte), &page_addr_phys, page_flags); +} + +static irqreturn_t rk_iommu_irq(int irq, void *dev_id) +{ + struct rk_iommu *iommu = dev_id; + u32 status; + u32 int_status; + dma_addr_t iova; + + int_status = rk_iommu_read(iommu, RK_MMU_INT_STATUS); + if (int_status == 0) + return IRQ_NONE; + + iova = rk_iommu_read(iommu, RK_MMU_PAGE_FAULT_ADDR); + + if (int_status & RK_MMU_IRQ_PAGE_FAULT) { + int flags; + + status = rk_iommu_read(iommu, RK_MMU_STATUS); + flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ? + IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; + + dev_err(iommu->dev, "Page fault at %pad of type %s\n", + &iova, + (flags == IOMMU_FAULT_WRITE) ? "write" : "read"); + + log_iova(iommu, iova); + + /* + * Report page fault to any installed handlers. + * Ignore the return code, though, since we always zap cache + * and clear the page fault anyway. + */ + if (iommu->domain) + report_iommu_fault(iommu->domain, iommu->dev, iova, + flags); + else + dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n"); + + rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE); + rk_iommu_command(iommu, RK_MMU_CMD_PAGE_FAULT_DONE); + } + + if (int_status & RK_MMU_IRQ_BUS_ERROR) + dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova); + + if (int_status & ~RK_MMU_IRQ_MASK) + dev_err(iommu->dev, "unexpected int_status: %#08x\n", + int_status); + + rk_iommu_write(iommu, RK_MMU_INT_CLEAR, int_status); + + return IRQ_HANDLED; +} + +static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain, + dma_addr_t iova) +{ + struct rk_iommu_domain *rk_domain = domain->priv; + unsigned long flags; + phys_addr_t pt_phys, phys = 0; + u32 dte, pte; + u32 *page_table; + + spin_lock_irqsave(&rk_domain->dt_lock, flags); + + dte = rk_domain->dt[rk_iova_dte_index(iova)]; + if (!rk_dte_is_pt_valid(dte)) + goto out; + + pt_phys = rk_dte_pt_address(dte); + page_table = (u32 *)phys_to_virt(pt_phys); + pte = page_table[rk_iova_pte_index(iova)]; + if (!rk_pte_is_page_valid(pte)) + goto out; + + phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova); +out: + spin_unlock_irqrestore(&rk_domain->dt_lock, flags); + + return phys; +} + +static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain, + dma_addr_t iova, size_t size) +{ + struct list_head *pos; + unsigned long flags; + + /* shootdown these iova from all iommus using this domain */ + spin_lock_irqsave(&rk_domain->iommus_lock, flags); + list_for_each(pos, &rk_domain->iommus) { + struct rk_iommu *iommu; + iommu = list_entry(pos, struct rk_iommu, node); + rk_iommu_zap_lines(iommu, iova, size); + } + spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); +} + +static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain, + dma_addr_t iova) +{ + u32 *page_table, *dte_addr; + u32 dte; + phys_addr_t pt_phys; + + assert_spin_locked(&rk_domain->dt_lock); + + dte_addr = &rk_domain->dt[rk_iova_dte_index(iova)]; + dte = *dte_addr; + if (rk_dte_is_pt_valid(dte)) + goto done; + + page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); + if (!page_table) + return ERR_PTR(-ENOMEM); + + dte = rk_mk_dte(page_table); + *dte_addr = dte; + + rk_table_flush(page_table, NUM_PT_ENTRIES); + rk_table_flush(dte_addr, 1); + + /* + * Zap the first iova of newly allocated page table so iommu evicts + * old cached value of new dte from the iotlb. + */ + rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE); + +done: + pt_phys = rk_dte_pt_address(dte); + return (u32 *)phys_to_virt(pt_phys); +} + +static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain, + u32 *pte_addr, dma_addr_t iova, size_t size) +{ + unsigned int pte_count; + unsigned int pte_total = size / SPAGE_SIZE; + + assert_spin_locked(&rk_domain->dt_lock); + + for (pte_count = 0; pte_count < pte_total; pte_count++) { + u32 pte = pte_addr[pte_count]; + if (!rk_pte_is_page_valid(pte)) + break; + + pte_addr[pte_count] = rk_mk_pte_invalid(pte); + } + + rk_table_flush(pte_addr, pte_count); + + return pte_count * SPAGE_SIZE; +} + +static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, + dma_addr_t iova, phys_addr_t paddr, size_t size, + int prot) +{ + unsigned int pte_count; + unsigned int pte_total = size / SPAGE_SIZE; + phys_addr_t page_phys; + + assert_spin_locked(&rk_domain->dt_lock); + + for (pte_count = 0; pte_count < pte_total; pte_count++) { + u32 pte = pte_addr[pte_count]; + + if (rk_pte_is_page_valid(pte)) + goto unwind; + + pte_addr[pte_count] = rk_mk_pte(paddr, prot); + + paddr += SPAGE_SIZE; + } + + rk_table_flush(pte_addr, pte_count); + + return 0; +unwind: + /* Unmap the range of iovas that we just mapped */ + rk_iommu_unmap_iova(rk_domain, pte_addr, iova, pte_count * SPAGE_SIZE); + + iova += pte_count * SPAGE_SIZE; + page_phys = rk_pte_page_address(pte_addr[pte_count]); + pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n", + &iova, &page_phys, &paddr, prot); + + return -EADDRINUSE; +} + +static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, + phys_addr_t paddr, size_t size, int prot) +{ + struct rk_iommu_domain *rk_domain = domain->priv; + unsigned long flags; + dma_addr_t iova = (dma_addr_t)_iova; + u32 *page_table, *pte_addr; + int ret; + + spin_lock_irqsave(&rk_domain->dt_lock, flags); + + /* + * pgsize_bitmap specifies iova sizes that fit in one page table + * (1024 4-KiB pages = 4 MiB). + * So, size will always be 4096 <= size <= 4194304. + * Since iommu_map() guarantees that both iova and size will be + * aligned, we will always only be mapping from a single dte here. + */ + page_table = rk_dte_get_page_table(rk_domain, iova); + if (IS_ERR(page_table)) { + spin_unlock_irqrestore(&rk_domain->dt_lock, flags); + return PTR_ERR(page_table); + } + + pte_addr = &page_table[rk_iova_pte_index(iova)]; + ret = rk_iommu_map_iova(rk_domain, pte_addr, iova, paddr, size, prot); + spin_unlock_irqrestore(&rk_domain->dt_lock, flags); + + return ret; +} + +static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, + size_t size) +{ + struct rk_iommu_domain *rk_domain = domain->priv; + unsigned long flags; + dma_addr_t iova = (dma_addr_t)_iova; + phys_addr_t pt_phys; + u32 dte; + u32 *pte_addr; + size_t unmap_size; + + spin_lock_irqsave(&rk_domain->dt_lock, flags); + + /* + * pgsize_bitmap specifies iova sizes that fit in one page table + * (1024 4-KiB pages = 4 MiB). + * So, size will always be 4096 <= size <= 4194304. + * Since iommu_unmap() guarantees that both iova and size will be + * aligned, we will always only be unmapping from a single dte here. + */ + dte = rk_domain->dt[rk_iova_dte_index(iova)]; + /* Just return 0 if iova is unmapped */ + if (!rk_dte_is_pt_valid(dte)) { + spin_unlock_irqrestore(&rk_domain->dt_lock, flags); + return 0; + } + + pt_phys = rk_dte_pt_address(dte); + pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova); + unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, iova, size); + + spin_unlock_irqrestore(&rk_domain->dt_lock, flags); + + /* Shootdown iotlb entries for iova range that was just unmapped */ + rk_iommu_zap_iova(rk_domain, iova, unmap_size); + + return unmap_size; +} + +static struct rk_iommu *rk_iommu_from_dev(struct device *dev) +{ + struct iommu_group *group; + struct device *iommu_dev; + struct rk_iommu *rk_iommu; + + group = iommu_group_get(dev); + if (!group) + return NULL; + iommu_dev = iommu_group_get_iommudata(group); + rk_iommu = dev_get_drvdata(iommu_dev); + iommu_group_put(group); + + return rk_iommu; +} + +static int rk_iommu_attach_device(struct iommu_domain *domain, + struct device *dev) +{ + struct rk_iommu *iommu; + struct rk_iommu_domain *rk_domain = domain->priv; + unsigned long flags; + int ret; + phys_addr_t dte_addr; + + /* + * Allow 'virtual devices' (e.g., drm) to attach to domain. + * Such a device does not belong to an iommu group. + */ + iommu = rk_iommu_from_dev(dev); + if (!iommu) + return 0; + + ret = rk_iommu_enable_stall(iommu); + if (ret) + return ret; + + ret = rk_iommu_force_reset(iommu); + if (ret) + return ret; + + iommu->domain = domain; + + ret = devm_request_irq(dev, iommu->irq, rk_iommu_irq, + IRQF_SHARED, dev_name(dev), iommu); + if (ret) + return ret; + + dte_addr = virt_to_phys(rk_domain->dt); + rk_iommu_write(iommu, RK_MMU_DTE_ADDR, dte_addr); + rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE); + rk_iommu_write(iommu, RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); + + ret = rk_iommu_enable_paging(iommu); + if (ret) + return ret; + + spin_lock_irqsave(&rk_domain->iommus_lock, flags); + list_add_tail(&iommu->node, &rk_domain->iommus); + spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); + + dev_info(dev, "Attached to iommu domain\n"); + + rk_iommu_disable_stall(iommu); + + return 0; +} + +static void rk_iommu_detach_device(struct iommu_domain *domain, + struct device *dev) +{ + struct rk_iommu *iommu; + struct rk_iommu_domain *rk_domain = domain->priv; + unsigned long flags; + + /* Allow 'virtual devices' (eg drm) to detach from domain */ + iommu = rk_iommu_from_dev(dev); + if (!iommu) + return; + + spin_lock_irqsave(&rk_domain->iommus_lock, flags); + list_del_init(&iommu->node); + spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); + + /* Ignore error while disabling, just keep going */ + rk_iommu_enable_stall(iommu); + rk_iommu_disable_paging(iommu); + rk_iommu_write(iommu, RK_MMU_INT_MASK, 0); + rk_iommu_write(iommu, RK_MMU_DTE_ADDR, 0); + rk_iommu_disable_stall(iommu); + + devm_free_irq(dev, iommu->irq, iommu); + + iommu->domain = NULL; + + dev_info(dev, "Detached from iommu domain\n"); +} + +static int rk_iommu_domain_init(struct iommu_domain *domain) +{ + struct rk_iommu_domain *rk_domain; + + rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL); + if (!rk_domain) + return -ENOMEM; + + /* + * rk32xx iommus use a 2 level pagetable. + * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries. + * Allocate one 4 KiB page for each table. + */ + rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32); + if (!rk_domain->dt) + goto err_dt; + + rk_table_flush(rk_domain->dt, NUM_DT_ENTRIES); + + spin_lock_init(&rk_domain->iommus_lock); + spin_lock_init(&rk_domain->dt_lock); + INIT_LIST_HEAD(&rk_domain->iommus); + + domain->priv = rk_domain; + + return 0; +err_dt: + kfree(rk_domain); + return -ENOMEM; +} + +static void rk_iommu_domain_destroy(struct iommu_domain *domain) +{ + struct rk_iommu_domain *rk_domain = domain->priv; + int i; + + WARN_ON(!list_empty(&rk_domain->iommus)); + + for (i = 0; i < NUM_DT_ENTRIES; i++) { + u32 dte = rk_domain->dt[i]; + if (rk_dte_is_pt_valid(dte)) { + phys_addr_t pt_phys = rk_dte_pt_address(dte); + u32 *page_table = phys_to_virt(pt_phys); + free_page((unsigned long)page_table); + } + } + + free_page((unsigned long)rk_domain->dt); + kfree(domain->priv); + domain->priv = NULL; +} + +static bool rk_iommu_is_dev_iommu_master(struct device *dev) +{ + struct device_node *np = dev->of_node; + int ret; + + /* + * An iommu master has an iommus property containing a list of phandles + * to iommu nodes, each with an #iommu-cells property with value 0. + */ + ret = of_count_phandle_with_args(np, "iommus", "#iommu-cells"); + return (ret > 0); +} + +static int rk_iommu_group_set_iommudata(struct iommu_group *group, + struct device *dev) +{ + struct device_node *np = dev->of_node; + struct platform_device *pd; + int ret; + struct of_phandle_args args; + + /* + * An iommu master has an iommus property containing a list of phandles + * to iommu nodes, each with an #iommu-cells property with value 0. + */ + ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0, + &args); + if (ret) { + dev_err(dev, "of_parse_phandle_with_args(%s) => %d\n", + np->full_name, ret); + return ret; + } + if (args.args_count != 0) { + dev_err(dev, "incorrect number of iommu params found for %s (found %d, expected 0)\n", + args.np->full_name, args.args_count); + return -EINVAL; + } + + pd = of_find_device_by_node(args.np); + of_node_put(args.np); + if (!pd) { + dev_err(dev, "iommu %s not found\n", args.np->full_name); + return -EPROBE_DEFER; + } + + /* TODO(djkurtz): handle multiple slave iommus for a single master */ + iommu_group_set_iommudata(group, &pd->dev, NULL); + + return 0; +} + +static int rk_iommu_add_device(struct device *dev) +{ + struct iommu_group *group; + int ret; + + if (!rk_iommu_is_dev_iommu_master(dev)) + return -ENODEV; + + group = iommu_group_get(dev); + if (!group) { + group = iommu_group_alloc(); + if (IS_ERR(group)) { + dev_err(dev, "Failed to allocate IOMMU group\n"); + return PTR_ERR(group); + } + } + + ret = iommu_group_add_device(group, dev); + if (ret) + goto err_put_group; + + ret = rk_iommu_group_set_iommudata(group, dev); + if (ret) + goto err_remove_device; + + iommu_group_put(group); + + return 0; + +err_remove_device: + iommu_group_remove_device(dev); +err_put_group: + iommu_group_put(group); + return ret; +} + +static void rk_iommu_remove_device(struct device *dev) +{ + if (!rk_iommu_is_dev_iommu_master(dev)) + return; + + iommu_group_remove_device(dev); +} + +static const struct iommu_ops rk_iommu_ops = { + .domain_init = rk_iommu_domain_init, + .domain_destroy = rk_iommu_domain_destroy, + .attach_dev = rk_iommu_attach_device, + .detach_dev = rk_iommu_detach_device, + .map = rk_iommu_map, + .unmap = rk_iommu_unmap, + .add_device = rk_iommu_add_device, + .remove_device = rk_iommu_remove_device, + .iova_to_phys = rk_iommu_iova_to_phys, + .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP, +}; + +static int rk_iommu_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rk_iommu *iommu; + struct resource *res; + + iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); + if (!iommu) + return -ENOMEM; + + platform_set_drvdata(pdev, iommu); + iommu->dev = dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + iommu->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(iommu->base)) + return PTR_ERR(iommu->base); + + iommu->irq = platform_get_irq(pdev, 0); + if (iommu->irq < 0) { + dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq); + return -ENXIO; + } + + return 0; +} + +static int rk_iommu_remove(struct platform_device *pdev) +{ + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id rk_iommu_dt_ids[] = { + { .compatible = "rockchip,iommu" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids); +#endif + +static struct platform_driver rk_iommu_driver = { + .probe = rk_iommu_probe, + .remove = rk_iommu_remove, + .driver = { + .name = "rk_iommu", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(rk_iommu_dt_ids), + }, +}; + +static int __init rk_iommu_init(void) +{ + int ret; + + ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops); + if (ret) + return ret; + + return platform_driver_register(&rk_iommu_driver); +} +static void __exit rk_iommu_exit(void) +{ + platform_driver_unregister(&rk_iommu_driver); +} + +subsys_initcall(rk_iommu_init); +module_exit(rk_iommu_exit); + +MODULE_DESCRIPTION("IOMMU API for Rockchip"); +MODULE_AUTHOR("Simon Xue and Daniel Kurtz "); +MODULE_ALIAS("platform:rockchip-iommu"); +MODULE_LICENSE("GPL v2"); -- cgit v0.10.2 From 656d7077d8ffd1c2492d4a0a354367ab2e545059 Mon Sep 17 00:00:00 2001 From: Daniel Kurtz Date: Mon, 3 Nov 2014 10:53:28 +0800 Subject: dt-bindings: iommu: Add documentation for rockchip iommu Add binding documentation for Rockchip IOMMU. Signed-off-by: Daniel Kurtz Signed-off-by: Simon Xue Reviewed-by: Heiko Stuebner Signed-off-by: Joerg Roedel diff --git a/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt b/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt new file mode 100644 index 0000000..9a55ac3 --- /dev/null +++ b/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt @@ -0,0 +1,26 @@ +Rockchip IOMMU +============== + +A Rockchip DRM iommu translates io virtual addresses to physical addresses for +its master device. Each slave device is bound to a single master device, and +shares its clocks, power domain and irq. + +Required properties: +- compatible : Should be "rockchip,iommu" +- reg : Address space for the configuration registers +- interrupts : Interrupt specifier for the IOMMU instance +- interrupt-names : Interrupt name for the IOMMU instance +- #iommu-cells : Should be <0>. This indicates the iommu is a + "single-master" device, and needs no additional information + to associate with its master device. See: + Documentation/devicetree/bindings/iommu/iommu.txt + +Example: + + vopl_mmu: iommu@ff940300 { + compatible = "rockchip,iommu"; + reg = <0xff940300 0x100>; + interrupts = ; + interrupt-names = "vopl_mmu"; + #iommu-cells = <0>; + }; -- cgit v0.10.2 From 263bc3fd6a8e50dc3ef423f36a1ec9cd951a76b8 Mon Sep 17 00:00:00 2001 From: Kiran Padwal Date: Thu, 30 Oct 2014 11:55:11 +0530 Subject: iommu/msm: Use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. Signed-off-by: Kiran Padwal Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/msm_iommu_dev.c b/drivers/iommu/msm_iommu_dev.c index 9574d21..b6d01f9 100644 --- a/drivers/iommu/msm_iommu_dev.c +++ b/drivers/iommu/msm_iommu_dev.c @@ -131,7 +131,7 @@ static int msm_iommu_probe(struct platform_device *pdev) struct clk *iommu_clk; struct clk *iommu_pclk; struct msm_iommu_drvdata *drvdata; - struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data; + struct msm_iommu_dev *iommu_dev = dev_get_platdata(&pdev->dev); void __iomem *regs_base; int ret, irq, par; @@ -263,7 +263,7 @@ static int msm_iommu_remove(struct platform_device *pdev) static int msm_iommu_ctx_probe(struct platform_device *pdev) { - struct msm_iommu_ctx_dev *c = pdev->dev.platform_data; + struct msm_iommu_ctx_dev *c = dev_get_platdata(&pdev->dev); struct msm_iommu_drvdata *drvdata; struct msm_iommu_ctx_drvdata *ctx_drvdata; int i, ret; -- cgit v0.10.2 From 99cb9aee770e482ac5ab1fe77213a38df9dd4e77 Mon Sep 17 00:00:00 2001 From: Kiran Padwal Date: Thu, 30 Oct 2014 11:59:47 +0530 Subject: iommu/omap: Use dev_get_platdata() Use the wrapper function for retrieving the platform data instead of accessing dev->platform_data directly. Signed-off-by: Kiran Padwal Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 2ba3219..839cd8b 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -170,7 +170,7 @@ static int iommu_enable(struct omap_iommu *obj) { int err; struct platform_device *pdev = to_platform_device(obj->dev); - struct iommu_platform_data *pdata = pdev->dev.platform_data; + struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev); if (pdata && pdata->deassert_reset) { err = pdata->deassert_reset(pdev, pdata->reset_name); @@ -190,7 +190,7 @@ static int iommu_enable(struct omap_iommu *obj) static void iommu_disable(struct omap_iommu *obj) { struct platform_device *pdev = to_platform_device(obj->dev); - struct iommu_platform_data *pdata = pdev->dev.platform_data; + struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev); omap2_iommu_disable(obj); @@ -1007,7 +1007,7 @@ static int omap_iommu_probe(struct platform_device *pdev) int irq; struct omap_iommu *obj; struct resource *res; - struct iommu_platform_data *pdata = pdev->dev.platform_data; + struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev); struct device_node *of = pdev->dev.of_node; obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); -- cgit v0.10.2 From e222d6a4259c134553acab493b5ab9fc05d70132 Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Sat, 1 Nov 2014 11:45:32 +0800 Subject: iommu/ipmmu-vmsa: Return proper error if devm_request_irq fails Signed-off-by: Axel Lin Acked-by: Laurent Pinchart Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 7dab5cb..d25fa12 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -1184,7 +1184,7 @@ static int ipmmu_probe(struct platform_device *pdev) dev_name(&pdev->dev), mmu); if (ret < 0) { dev_err(&pdev->dev, "failed to request IRQ %d\n", irq); - return irq; + return ret; } ipmmu_device_reset(mmu); -- cgit v0.10.2 From 11175886daa6b31dfc713b6664215405de9ea1c7 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 3 Nov 2014 18:16:56 +0100 Subject: iommu/rockchip: Allow to compile with COMPILE_TEST Add optional CONFIG_COMPILE_TEST to dependency list. Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index d0a1261..f6f259a 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -154,7 +154,8 @@ config OMAP_IOMMU_DEBUG config ROCKCHIP_IOMMU bool "Rockchip IOMMU Support" - depends on ARCH_ROCKCHIP + depends on ARM + depends on ARCH_ROCKCHIP || COMPILE_TEST select IOMMU_API select ARM_DMA_USE_IOMMU help -- cgit v0.10.2 From 1a2262f90f493103496f3383741fb5d594c33738 Mon Sep 17 00:00:00 2001 From: "Li, Zhen-Hua" Date: Wed, 5 Nov 2014 15:30:19 +0800 Subject: x86/vt-d: Fix incorrect bit operations in setting values The function context_set_address_root() and set_root_value are setting new address in a wrong way, and this patch is trying to fix this problem. According to Intel Vt-d specs(Feb 2011, Revision 1.3), Chapter 9.1 and 9.2, field ctp in root entry is using bits 12:63, field asr in context entry is using bits 12:63. To set these fields, the following functions are used: static inline void context_set_address_root(struct context_entry *context, unsigned long value); and static inline void set_root_value(struct root_entry *root, unsigned long value) But they are using an invalid method to set these fields, in current code, only a '|' operator is used to set it. This will not set the asr to the expected value if it has an old value. For example: Before calling this function, context->lo = 0x3456789012111; value = 0x123456789abcef12; After we call context_set_address_root(context, value), expected result is context->lo == 0x123456789abce111; But the actual result is: context->lo == 0x1237577f9bbde111; So we need to clear bits 12:63 before setting the new value, this will fix this problem. Signed-off-by: Li, Zhen-Hua Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index a27d6cb..ba0fa2a 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -195,6 +195,7 @@ static inline void set_root_present(struct root_entry *root) } static inline void set_root_value(struct root_entry *root, unsigned long value) { + root->val &= ~VTD_PAGE_MASK; root->val |= value & VTD_PAGE_MASK; } @@ -247,6 +248,7 @@ static inline void context_set_translation_type(struct context_entry *context, static inline void context_set_address_root(struct context_entry *context, unsigned long value) { + context->lo &= ~VTD_PAGE_MASK; context->lo |= value & VTD_PAGE_MASK; } -- cgit v0.10.2 From 1c51099a427dc8bfee917b2d0e53ad3fafbb930d Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Mon, 10 Nov 2014 12:21:39 +0200 Subject: iommu/amd: Fix accounting of device_state This patch fixes a bug in the accounting of the device_state. In the current code, the device_state was put (decremented) too many times, which sometimes lead to the driver getting stuck permanently in put_device_state_wait(). That happen because the device_state->count would go below zero, which is never supposed to happen. The root cause is that the device_state was decremented in put_pasid_state() and put_pasid_state_wait() but also in all the functions that call those functions. Therefore, the device_state was decremented twice in each of these code paths. The fix is to decouple the device_state accounting from the pasid_state accounting - remove the call to put_device_state() from the put_pasid_state() and the put_pasid_state_wait()) Signed-off-by: Oded Gabbay Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 90d734b..a2d87a6 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -279,10 +279,8 @@ static void free_pasid_state(struct pasid_state *pasid_state) static void put_pasid_state(struct pasid_state *pasid_state) { - if (atomic_dec_and_test(&pasid_state->count)) { - put_device_state(pasid_state->device_state); + if (atomic_dec_and_test(&pasid_state->count)) wake_up(&pasid_state->wq); - } } static void put_pasid_state_wait(struct pasid_state *pasid_state) @@ -291,9 +289,7 @@ static void put_pasid_state_wait(struct pasid_state *pasid_state) prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE); - if (atomic_dec_and_test(&pasid_state->count)) - put_device_state(pasid_state->device_state); - else + if (!atomic_dec_and_test(&pasid_state->count)) schedule(); finish_wait(&pasid_state->wq, &wait); -- cgit v0.10.2 From a720b41c41f5a7e4c51558cf087882c57331581f Mon Sep 17 00:00:00 2001 From: Antonios Motakis Date: Mon, 13 Oct 2014 14:06:16 +0100 Subject: iommu/arm-smmu: change IOMMU_EXEC to IOMMU_NOEXEC Exposing the XN flag of the SMMU driver as IOMMU_NOEXEC instead of IOMMU_EXEC makes it enforceable, since for IOMMUs that don't support the XN flag pages will always be executable. Signed-off-by: Antonios Motakis Acked-by: Joerg Roedel Signed-off-by: Will Deacon diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 60558f7..566c176 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1281,7 +1281,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, unsigned long pfn, int prot, int stage) { pte_t *pte, *start; - pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN; + pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF; if (pmd_none(*pmd)) { /* Allocate a new set of tables */ @@ -1315,10 +1315,11 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, pteval |= ARM_SMMU_PTE_MEMATTR_NC; } + if (prot & IOMMU_NOEXEC) + pteval |= ARM_SMMU_PTE_XN; + /* If no access, create a faulting entry to avoid TLB fills */ - if (prot & IOMMU_EXEC) - pteval &= ~ARM_SMMU_PTE_XN; - else if (!(prot & (IOMMU_READ | IOMMU_WRITE))) + if (!(prot & (IOMMU_READ | IOMMU_WRITE))) pteval &= ~ARM_SMMU_PTE_PAGE; pteval |= ARM_SMMU_PTE_SH_IS; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index e6a7c9f..f47383a 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -27,7 +27,7 @@ #define IOMMU_READ (1 << 0) #define IOMMU_WRITE (1 << 1) #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ -#define IOMMU_EXEC (1 << 3) +#define IOMMU_NOEXEC (1 << 3) struct iommu_ops; struct iommu_group; -- cgit v0.10.2 From c49866493b1ffb7c0a7963a1e3c0094e78760184 Mon Sep 17 00:00:00 2001 From: Antonios Motakis Date: Mon, 13 Oct 2014 14:06:17 +0100 Subject: iommu: add capability IOMMU_CAP_NOEXEC Some IOMMUs accept an IOMMU_NOEXEC protection flag in addition to IOMMU_READ and IOMMU_WRITE. Expose this as an IOMMU capability. Signed-off-by: Antonios Motakis Acked-by: Joerg Roedel Signed-off-by: Will Deacon diff --git a/include/linux/iommu.h b/include/linux/iommu.h index f47383a..e438b30 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -61,6 +61,7 @@ enum iommu_cap { IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA transactions */ IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */ + IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ }; /* -- cgit v0.10.2 From 0029a8dd6c807e04b119ec9416510d30936f45e6 Mon Sep 17 00:00:00 2001 From: Antonios Motakis Date: Mon, 13 Oct 2014 14:06:18 +0100 Subject: iommu/arm-smmu: add IOMMU_CAP_NOEXEC to the ARM SMMU driver The ARM SMMU supports the IOMMU_NOEXEC protection flag. Add the corresponding IOMMU capability. Signed-off-by: Antonios Motakis Signed-off-by: Will Deacon diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 566c176..c8fc02f 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1569,6 +1569,8 @@ static bool arm_smmu_capable(enum iommu_cap cap) return true; case IOMMU_CAP_INTR_REMAP: return true; /* MSIs are just memory writes */ + case IOMMU_CAP_NOEXEC: + return true; default: return false; } -- cgit v0.10.2 From cfdeec22e459e3c5081c8fc6d956c8ee68943397 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 27 Oct 2014 11:24:48 +0000 Subject: iommu/amd: remove compiler warning due to IOMMU_CAP_NOEXEC Some versions of GCC get unduly upset when confronted with a switch that doesn't explicitly handle all cases of an enum, despite having an implicit default case following the actualy switch statement: drivers/iommu/amd_iommu.c: In function 'amd_iommu_capable': >> drivers/iommu/amd_iommu.c:3409:2: warning: enumeration value 'IOMMU_CAP_NOEXEC' not handled in switch [-Wswitch] switch (cap) { This patch adds a case for IOMMU_CAP_NOEXEC to the amd IOMMU driver to remove this warning. Cc: Joerg Roedel Signed-off-by: Will Deacon diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 505a9ad..3d78a8f 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3411,6 +3411,8 @@ static bool amd_iommu_capable(enum iommu_cap cap) return true; case IOMMU_CAP_INTR_REMAP: return (irq_remapping_enabled == 1); + case IOMMU_CAP_NOEXEC: + return false; } return false; -- cgit v0.10.2 From 0e7d37adbe45404a76d05d4ef11544f23cf639dd Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Fri, 7 Nov 2014 15:26:18 +0000 Subject: iommu/arm-smmu: Play nice on non-ARM/SMMU systems Currently the driver registers IOMMU bus operations for all busses even if no ARM SMMU is present on a system. Depending on the driver probing order this prevents the driver for the real IOMMU to register itself as the bus-wide IOMMU. Signed-off-by: Thierry Reding Signed-off-by: Will Deacon diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index c8fc02f..2175814 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -2075,8 +2075,20 @@ static struct platform_driver arm_smmu_driver = { static int __init arm_smmu_init(void) { + struct device_node *np; int ret; + /* + * Play nice with systems that don't have an ARM SMMU by checking that + * an ARM SMMU exists in the system before proceeding with the driver + * and IOMMU bus operation registration. + */ + np = of_find_matching_node(NULL, arm_smmu_of_match); + if (!np) + return 0; + + of_node_put(np); + ret = platform_driver_register(&arm_smmu_driver); if (ret) return ret; -- cgit v0.10.2 From c752ce45b213de8532baaf987ba930638f77c439 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 25 Jun 2014 22:46:31 +0100 Subject: iommu/arm-smmu: add support for DOMAIN_ATTR_NESTING attribute When domains are set with the DOMAIN_ATTR_NESTING flag, we must ensure that we allocate them to stage-2 context banks if the hardware permits it. This patch adds support for the attribute to the ARM SMMU driver, with the actual stage being determined depending on the features supported by the hardware. Signed-off-by: Will Deacon diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 2175814..7a80f71 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -404,9 +404,16 @@ struct arm_smmu_cfg { #define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx) #define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) +enum arm_smmu_domain_stage { + ARM_SMMU_DOMAIN_S1 = 0, + ARM_SMMU_DOMAIN_S2, + ARM_SMMU_DOMAIN_NESTED, +}; + struct arm_smmu_domain { struct arm_smmu_device *smmu; struct arm_smmu_cfg cfg; + enum arm_smmu_domain_stage stage; spinlock_t lock; }; @@ -906,19 +913,46 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, if (smmu_domain->smmu) goto out_unlock; - if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) { + /* + * Mapping the requested stage onto what we support is surprisingly + * complicated, mainly because the spec allows S1+S2 SMMUs without + * support for nested translation. That means we end up with the + * following table: + * + * Requested Supported Actual + * S1 N S1 + * S1 S1+S2 S1 + * S1 S2 S2 + * S1 S1 S1 + * N N N + * N S1+S2 S2 + * N S2 S2 + * N S1 S1 + * + * Note that you can't actually request stage-2 mappings. + */ + if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) + smmu_domain->stage = ARM_SMMU_DOMAIN_S2; + if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) + smmu_domain->stage = ARM_SMMU_DOMAIN_S1; + + switch (smmu_domain->stage) { + case ARM_SMMU_DOMAIN_S1: + cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; + start = smmu->num_s2_context_banks; + break; + case ARM_SMMU_DOMAIN_NESTED: /* * We will likely want to change this if/when KVM gets * involved. */ - cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; - start = smmu->num_s2_context_banks; - } else if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) { - cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; - start = smmu->num_s2_context_banks; - } else { + case ARM_SMMU_DOMAIN_S2: cfg->cbar = CBAR_TYPE_S2_TRANS; start = 0; + break; + default: + ret = -EINVAL; + goto out_unlock; } ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, @@ -1647,20 +1681,56 @@ static void arm_smmu_remove_device(struct device *dev) iommu_group_remove_device(dev); } +static int arm_smmu_domain_get_attr(struct iommu_domain *domain, + enum iommu_attr attr, void *data) +{ + struct arm_smmu_domain *smmu_domain = domain->priv; + + switch (attr) { + case DOMAIN_ATTR_NESTING: + *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); + return 0; + default: + return -ENODEV; + } +} + +static int arm_smmu_domain_set_attr(struct iommu_domain *domain, + enum iommu_attr attr, void *data) +{ + struct arm_smmu_domain *smmu_domain = domain->priv; + + switch (attr) { + case DOMAIN_ATTR_NESTING: + if (smmu_domain->smmu) + return -EPERM; + if (*(int *)data) + smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; + else + smmu_domain->stage = ARM_SMMU_DOMAIN_S1; + + return 0; + default: + return -ENODEV; + } +} + static const struct iommu_ops arm_smmu_ops = { - .capable = arm_smmu_capable, - .domain_init = arm_smmu_domain_init, - .domain_destroy = arm_smmu_domain_destroy, - .attach_dev = arm_smmu_attach_dev, - .detach_dev = arm_smmu_detach_dev, - .map = arm_smmu_map, - .unmap = arm_smmu_unmap, - .iova_to_phys = arm_smmu_iova_to_phys, - .add_device = arm_smmu_add_device, - .remove_device = arm_smmu_remove_device, - .pgsize_bitmap = (SECTION_SIZE | - ARM_SMMU_PTE_CONT_SIZE | - PAGE_SIZE), + .capable = arm_smmu_capable, + .domain_init = arm_smmu_domain_init, + .domain_destroy = arm_smmu_domain_destroy, + .attach_dev = arm_smmu_attach_dev, + .detach_dev = arm_smmu_detach_dev, + .map = arm_smmu_map, + .unmap = arm_smmu_unmap, + .iova_to_phys = arm_smmu_iova_to_phys, + .add_device = arm_smmu_add_device, + .remove_device = arm_smmu_remove_device, + .domain_get_attr = arm_smmu_domain_get_attr, + .domain_set_attr = arm_smmu_domain_set_attr, + .pgsize_bitmap = (SECTION_SIZE | + ARM_SMMU_PTE_CONT_SIZE | + PAGE_SIZE), }; static void arm_smmu_device_reset(struct arm_smmu_device *smmu) -- cgit v0.10.2 From c2a0b538d2c778aef7bf2fbe7973229192c9a392 Mon Sep 17 00:00:00 2001 From: Jiang Liu Date: Sun, 9 Nov 2014 22:47:56 +0800 Subject: iommu/vt-d: Introduce helper function dmar_walk_resources() Introduce helper function dmar_walk_resources to walk resource entries in DMAR table and ACPI buffer object returned by ACPI _DSM method for IOMMU hot-plug. Signed-off-by: Jiang Liu Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index c5c61ca..586dd2a 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -44,6 +44,14 @@ #include "irq_remapping.h" +typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *); +struct dmar_res_callback { + dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED]; + void *arg[ACPI_DMAR_TYPE_RESERVED]; + bool ignore_unhandled; + bool print_entry; +}; + /* * Assumptions: * 1) The hotplug framework guarentees that DMAR unit will be hot-added @@ -350,7 +358,7 @@ static struct notifier_block dmar_pci_bus_nb = { * present in the platform */ static int __init -dmar_parse_one_drhd(struct acpi_dmar_header *header) +dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg) { struct acpi_dmar_hardware_unit *drhd; struct dmar_drhd_unit *dmaru; @@ -381,6 +389,10 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header) return ret; } dmar_register_drhd_unit(dmaru); + + if (arg) + (*(int *)arg)++; + return 0; } @@ -393,7 +405,8 @@ static void dmar_free_drhd(struct dmar_drhd_unit *dmaru) kfree(dmaru); } -static int __init dmar_parse_one_andd(struct acpi_dmar_header *header) +static int __init dmar_parse_one_andd(struct acpi_dmar_header *header, + void *arg) { struct acpi_dmar_andd *andd = (void *)header; @@ -415,7 +428,7 @@ static int __init dmar_parse_one_andd(struct acpi_dmar_header *header) #ifdef CONFIG_ACPI_NUMA static int __init -dmar_parse_one_rhsa(struct acpi_dmar_header *header) +dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg) { struct acpi_dmar_rhsa *rhsa; struct dmar_drhd_unit *drhd; @@ -442,6 +455,8 @@ dmar_parse_one_rhsa(struct acpi_dmar_header *header) return 0; } +#else +#define dmar_parse_one_rhsa dmar_res_noop #endif static void __init @@ -503,6 +518,52 @@ static int __init dmar_table_detect(void) return (ACPI_SUCCESS(status) ? 1 : 0); } +static int dmar_walk_remapping_entries(struct acpi_dmar_header *start, + size_t len, struct dmar_res_callback *cb) +{ + int ret = 0; + struct acpi_dmar_header *iter, *next; + struct acpi_dmar_header *end = ((void *)start) + len; + + for (iter = start; iter < end && ret == 0; iter = next) { + next = (void *)iter + iter->length; + if (iter->length == 0) { + /* Avoid looping forever on bad ACPI tables */ + pr_debug(FW_BUG "Invalid 0-length structure\n"); + break; + } else if (next > end) { + /* Avoid passing table end */ + pr_warn(FW_BUG "record passes table end\n"); + ret = -EINVAL; + break; + } + + if (cb->print_entry) + dmar_table_print_dmar_entry(iter); + + if (iter->type >= ACPI_DMAR_TYPE_RESERVED) { + /* continue for forward compatibility */ + pr_debug("Unknown DMAR structure type %d\n", + iter->type); + } else if (cb->cb[iter->type]) { + ret = cb->cb[iter->type](iter, cb->arg[iter->type]); + } else if (!cb->ignore_unhandled) { + pr_warn("No handler for DMAR structure type %d\n", + iter->type); + ret = -EINVAL; + } + } + + return ret; +} + +static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar, + struct dmar_res_callback *cb) +{ + return dmar_walk_remapping_entries((void *)(dmar + 1), + dmar->header.length - sizeof(*dmar), cb); +} + /** * parse_dmar_table - parses the DMA reporting table */ @@ -510,9 +571,18 @@ static int __init parse_dmar_table(void) { struct acpi_table_dmar *dmar; - struct acpi_dmar_header *entry_header; int ret = 0; int drhd_count = 0; + struct dmar_res_callback cb = { + .print_entry = true, + .ignore_unhandled = true, + .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count, + .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd, + .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr, + .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr, + .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa, + .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd, + }; /* * Do it again, earlier dmar_tbl mapping could be mapped with @@ -536,51 +606,10 @@ parse_dmar_table(void) } pr_info("Host address width %d\n", dmar->width + 1); - - entry_header = (struct acpi_dmar_header *)(dmar + 1); - while (((unsigned long)entry_header) < - (((unsigned long)dmar) + dmar_tbl->length)) { - /* Avoid looping forever on bad ACPI tables */ - if (entry_header->length == 0) { - pr_warn("Invalid 0-length structure\n"); - ret = -EINVAL; - break; - } - - dmar_table_print_dmar_entry(entry_header); - - switch (entry_header->type) { - case ACPI_DMAR_TYPE_HARDWARE_UNIT: - drhd_count++; - ret = dmar_parse_one_drhd(entry_header); - break; - case ACPI_DMAR_TYPE_RESERVED_MEMORY: - ret = dmar_parse_one_rmrr(entry_header); - break; - case ACPI_DMAR_TYPE_ROOT_ATS: - ret = dmar_parse_one_atsr(entry_header); - break; - case ACPI_DMAR_TYPE_HARDWARE_AFFINITY: -#ifdef CONFIG_ACPI_NUMA - ret = dmar_parse_one_rhsa(entry_header); -#endif - break; - case ACPI_DMAR_TYPE_NAMESPACE: - ret = dmar_parse_one_andd(entry_header); - break; - default: - pr_warn("Unknown DMAR structure type %d\n", - entry_header->type); - ret = 0; /* for forward compatibility */ - break; - } - if (ret) - break; - - entry_header = ((void *)entry_header + entry_header->length); - } - if (drhd_count == 0) + ret = dmar_walk_dmar_table(dmar, &cb); + if (ret == 0 && drhd_count == 0) pr_warn(FW_BUG "No DRHD structure found in DMAR table\n"); + return ret; } @@ -778,76 +807,60 @@ static void warn_invalid_dmar(u64 addr, const char *message) dmi_get_system_info(DMI_PRODUCT_VERSION)); } -static int __init check_zero_address(void) +static int __ref +dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg) { - struct acpi_table_dmar *dmar; - struct acpi_dmar_header *entry_header; struct acpi_dmar_hardware_unit *drhd; + void __iomem *addr; + u64 cap, ecap; - dmar = (struct acpi_table_dmar *)dmar_tbl; - entry_header = (struct acpi_dmar_header *)(dmar + 1); - - while (((unsigned long)entry_header) < - (((unsigned long)dmar) + dmar_tbl->length)) { - /* Avoid looping forever on bad ACPI tables */ - if (entry_header->length == 0) { - pr_warn("Invalid 0-length structure\n"); - return 0; - } - - if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) { - void __iomem *addr; - u64 cap, ecap; - - drhd = (void *)entry_header; - if (!drhd->address) { - warn_invalid_dmar(0, ""); - goto failed; - } + drhd = (void *)entry; + if (!drhd->address) { + warn_invalid_dmar(0, ""); + return -EINVAL; + } - addr = early_ioremap(drhd->address, VTD_PAGE_SIZE); - if (!addr ) { - printk("IOMMU: can't validate: %llx\n", drhd->address); - goto failed; - } - cap = dmar_readq(addr + DMAR_CAP_REG); - ecap = dmar_readq(addr + DMAR_ECAP_REG); - early_iounmap(addr, VTD_PAGE_SIZE); - if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) { - warn_invalid_dmar(drhd->address, - " returns all ones"); - goto failed; - } - } + addr = early_ioremap(drhd->address, VTD_PAGE_SIZE); + if (!addr) { + pr_warn("IOMMU: can't validate: %llx\n", drhd->address); + return -EINVAL; + } + cap = dmar_readq(addr + DMAR_CAP_REG); + ecap = dmar_readq(addr + DMAR_ECAP_REG); + early_iounmap(addr, VTD_PAGE_SIZE); - entry_header = ((void *)entry_header + entry_header->length); + if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) { + warn_invalid_dmar(drhd->address, " returns all ones"); + return -EINVAL; } - return 1; -failed: return 0; } int __init detect_intel_iommu(void) { int ret; + struct dmar_res_callback validate_drhd_cb = { + .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd, + .ignore_unhandled = true, + }; down_write(&dmar_global_lock); ret = dmar_table_detect(); if (ret) - ret = check_zero_address(); - { - if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { - iommu_detected = 1; - /* Make sure ACS will be enabled */ - pci_request_acs(); - } + ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl, + &validate_drhd_cb); + if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { + iommu_detected = 1; + /* Make sure ACS will be enabled */ + pci_request_acs(); + } #ifdef CONFIG_X86 - if (ret) - x86_init.iommu.iommu_init = intel_iommu_init; + if (ret) + x86_init.iommu.iommu_init = intel_iommu_init; #endif - } + early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size); dmar_tbl = NULL; up_write(&dmar_global_lock); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index ba0fa2a..b9cc9c2 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -3684,7 +3684,7 @@ static inline void init_iommu_pm_ops(void) {} #endif /* CONFIG_PM */ -int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header) +int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg) { struct acpi_dmar_reserved_memory *rmrr; struct dmar_rmrr_unit *rmrru; @@ -3710,7 +3710,7 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header) return 0; } -int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr) +int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg) { struct acpi_dmar_atsr *atsr; struct dmar_atsr_unit *atsru; diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 593fff9..495df5e 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -121,22 +121,21 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, extern int detect_intel_iommu(void); extern int enable_drhd_fault_handling(void); +static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg) +{ + return 0; +} + #ifdef CONFIG_INTEL_IOMMU extern int iommu_detected, no_iommu; extern int intel_iommu_init(void); -extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header); -extern int dmar_parse_one_atsr(struct acpi_dmar_header *header); +extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg); +extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg); extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); #else /* !CONFIG_INTEL_IOMMU: */ static inline int intel_iommu_init(void) { return -ENODEV; } -static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header) -{ - return 0; -} -static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header) -{ - return 0; -} +#define dmar_parse_one_rmrr dmar_res_noop +#define dmar_parse_one_atsr dmar_res_noop static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) { return 0; -- cgit v0.10.2 From 78d8e7046111425bb688cddc4303d79cb0f0d281 Mon Sep 17 00:00:00 2001 From: Jiang Liu Date: Sun, 9 Nov 2014 22:47:57 +0800 Subject: iommu/vt-d: Dynamically allocate and free seq_id for DMAR units Introduce functions to support dynamic IOMMU seq_id allocating and releasing, which will be used to support DMAR hotplug. Also rename IOMMU_UNITS_SUPPORTED as DMAR_UNITS_SUPPORTED. Signed-off-by: Jiang Liu Reviewed-by: Yijing Wang Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 586dd2a..78aa1b2 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -70,6 +70,7 @@ LIST_HEAD(dmar_drhd_units); struct acpi_table_header * __initdata dmar_tbl; static acpi_size dmar_tbl_size; static int dmar_dev_scope_status = 1; +static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)]; static int alloc_iommu(struct dmar_drhd_unit *drhd); static void free_iommu(struct intel_iommu *iommu); @@ -944,11 +945,32 @@ out: return err; } +static int dmar_alloc_seq_id(struct intel_iommu *iommu) +{ + iommu->seq_id = find_first_zero_bit(dmar_seq_ids, + DMAR_UNITS_SUPPORTED); + if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) { + iommu->seq_id = -1; + } else { + set_bit(iommu->seq_id, dmar_seq_ids); + sprintf(iommu->name, "dmar%d", iommu->seq_id); + } + + return iommu->seq_id; +} + +static void dmar_free_seq_id(struct intel_iommu *iommu) +{ + if (iommu->seq_id >= 0) { + clear_bit(iommu->seq_id, dmar_seq_ids); + iommu->seq_id = -1; + } +} + static int alloc_iommu(struct dmar_drhd_unit *drhd) { struct intel_iommu *iommu; u32 ver, sts; - static int iommu_allocated = 0; int agaw = 0; int msagaw = 0; int err; @@ -962,13 +984,16 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) if (!iommu) return -ENOMEM; - iommu->seq_id = iommu_allocated++; - sprintf (iommu->name, "dmar%d", iommu->seq_id); + if (dmar_alloc_seq_id(iommu) < 0) { + pr_err("IOMMU: failed to allocate seq_id\n"); + err = -ENOSPC; + goto error; + } err = map_iommu(iommu, drhd->reg_base_addr); if (err) { pr_err("IOMMU: failed to map %s\n", iommu->name); - goto error; + goto error_free_seq_id; } err = -EINVAL; @@ -1018,9 +1043,11 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) return 0; - err_unmap: +err_unmap: unmap_iommu(iommu); - error: +error_free_seq_id: + dmar_free_seq_id(iommu); +error: kfree(iommu); return err; } @@ -1044,6 +1071,7 @@ static void free_iommu(struct intel_iommu *iommu) if (iommu->reg) unmap_iommu(iommu); + dmar_free_seq_id(iommu); kfree(iommu); } diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index b9cc9c2..2779354 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -330,17 +330,10 @@ static int hw_pass_through = 1; /* si_domain contains mulitple devices */ #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1) -/* define the limit of IOMMUs supported in each domain */ -#ifdef CONFIG_X86 -# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS -#else -# define IOMMU_UNITS_SUPPORTED 64 -#endif - struct dmar_domain { int id; /* domain id */ int nid; /* node id */ - DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED); + DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED); /* bitmap of iommus this domain uses*/ struct list_head devices; /* all devices' list */ @@ -2730,12 +2723,12 @@ static int __init init_dmars(void) * threaded kernel __init code path all other access are read * only */ - if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) { + if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) { g_num_of_iommus++; continue; } printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n", - IOMMU_UNITS_SUPPORTED); + DMAR_UNITS_SUPPORTED); } g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *), diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 495df5e..725204f 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -30,6 +30,12 @@ struct acpi_dmar_header; +#ifdef CONFIG_X86 +# define DMAR_UNITS_SUPPORTED MAX_IO_APICS +#else +# define DMAR_UNITS_SUPPORTED 64 +#endif + /* DMAR Flags */ #define DMAR_INTR_REMAP 0x1 #define DMAR_X2APIC_OPT_OUT 0x2 -- cgit v0.10.2 From 6b1972493a84f8fe13ff9d202745590f6c53d670 Mon Sep 17 00:00:00 2001 From: Jiang Liu Date: Sun, 9 Nov 2014 22:47:58 +0800 Subject: iommu/vt-d: Implement DMAR unit hotplug framework On Intel platforms, an IO Hub (PCI/PCIe host bridge) may contain DMAR units, so we need to support DMAR hotplug when supporting PCI host bridge hotplug on Intel platforms. According to Section 8.8 "Remapping Hardware Unit Hot Plug" in "Intel Virtualization Technology for Directed IO Architecture Specification Rev 2.2", ACPI BIOS should implement ACPI _DSM method under the ACPI object for the PCI host bridge to support DMAR hotplug. This patch introduces interfaces to parse ACPI _DSM method for DMAR unit hotplug. It also implements state machines for DMAR unit hot-addition and hot-removal. The PCI host bridge hotplug driver should call dmar_hotplug_hotplug() before scanning PCI devices connected for hot-addition and after destroying all PCI devices for hot-removal. Signed-off-by: Jiang Liu Reviewed-by: Yijing Wang Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 78aa1b2..0bd536d 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -75,7 +75,7 @@ static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)]; static int alloc_iommu(struct dmar_drhd_unit *drhd); static void free_iommu(struct intel_iommu *iommu); -static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) +static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) { /* * add INCLUDE_ALL at the tail, so scan the list will find it at @@ -353,24 +353,45 @@ static struct notifier_block dmar_pci_bus_nb = { .priority = INT_MIN, }; +static struct dmar_drhd_unit * +dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd) +{ + struct dmar_drhd_unit *dmaru; + + list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list) + if (dmaru->segment == drhd->segment && + dmaru->reg_base_addr == drhd->address) + return dmaru; + + return NULL; +} + /** * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition * structure which uniquely represent one DMA remapping hardware unit * present in the platform */ -static int __init -dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg) +static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg) { struct acpi_dmar_hardware_unit *drhd; struct dmar_drhd_unit *dmaru; int ret = 0; drhd = (struct acpi_dmar_hardware_unit *)header; - dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); + dmaru = dmar_find_dmaru(drhd); + if (dmaru) + goto out; + + dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL); if (!dmaru) return -ENOMEM; - dmaru->hdr = header; + /* + * If header is allocated from slab by ACPI _DSM method, we need to + * copy the content because the memory buffer will be freed on return. + */ + dmaru->hdr = (void *)(dmaru + 1); + memcpy(dmaru->hdr, header, header->length); dmaru->reg_base_addr = drhd->address; dmaru->segment = drhd->segment; dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ @@ -391,6 +412,7 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg) } dmar_register_drhd_unit(dmaru); +out: if (arg) (*(int *)arg)++; @@ -428,8 +450,7 @@ static int __init dmar_parse_one_andd(struct acpi_dmar_header *header, } #ifdef CONFIG_ACPI_NUMA -static int __init -dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg) +static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg) { struct acpi_dmar_rhsa *rhsa; struct dmar_drhd_unit *drhd; @@ -821,14 +842,22 @@ dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg) return -EINVAL; } - addr = early_ioremap(drhd->address, VTD_PAGE_SIZE); + if (arg) + addr = ioremap(drhd->address, VTD_PAGE_SIZE); + else + addr = early_ioremap(drhd->address, VTD_PAGE_SIZE); if (!addr) { pr_warn("IOMMU: can't validate: %llx\n", drhd->address); return -EINVAL; } + cap = dmar_readq(addr + DMAR_CAP_REG); ecap = dmar_readq(addr + DMAR_ECAP_REG); - early_iounmap(addr, VTD_PAGE_SIZE); + + if (arg) + iounmap(addr); + else + early_iounmap(addr, VTD_PAGE_SIZE); if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) { warn_invalid_dmar(drhd->address, " returns all ones"); @@ -1702,12 +1731,17 @@ int __init dmar_ir_support(void) return dmar->flags & 0x1; } +/* Check whether DMAR units are in use */ +static inline bool dmar_in_use(void) +{ + return irq_remapping_enabled || intel_iommu_enabled; +} + static int __init dmar_free_unused_resources(void) { struct dmar_drhd_unit *dmaru, *dmaru_n; - /* DMAR units are in use */ - if (irq_remapping_enabled || intel_iommu_enabled) + if (dmar_in_use()) return 0; if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units)) @@ -1725,3 +1759,215 @@ static int __init dmar_free_unused_resources(void) late_initcall(dmar_free_unused_resources); IOMMU_INIT_POST(detect_intel_iommu); + +/* + * DMAR Hotplug Support + * For more details, please refer to Intel(R) Virtualization Technology + * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8 + * "Remapping Hardware Unit Hot Plug". + */ +static u8 dmar_hp_uuid[] = { + /* 0000 */ 0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C, + /* 0008 */ 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF +}; + +/* + * Currently there's only one revision and BIOS will not check the revision id, + * so use 0 for safety. + */ +#define DMAR_DSM_REV_ID 0 +#define DMAR_DSM_FUNC_DRHD 1 +#define DMAR_DSM_FUNC_ATSR 2 +#define DMAR_DSM_FUNC_RHSA 3 + +static inline bool dmar_detect_dsm(acpi_handle handle, int func) +{ + return acpi_check_dsm(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, 1 << func); +} + +static int dmar_walk_dsm_resource(acpi_handle handle, int func, + dmar_res_handler_t handler, void *arg) +{ + int ret = -ENODEV; + union acpi_object *obj; + struct acpi_dmar_header *start; + struct dmar_res_callback callback; + static int res_type[] = { + [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT, + [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS, + [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY, + }; + + if (!dmar_detect_dsm(handle, func)) + return 0; + + obj = acpi_evaluate_dsm_typed(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, + func, NULL, ACPI_TYPE_BUFFER); + if (!obj) + return -ENODEV; + + memset(&callback, 0, sizeof(callback)); + callback.cb[res_type[func]] = handler; + callback.arg[res_type[func]] = arg; + start = (struct acpi_dmar_header *)obj->buffer.pointer; + ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback); + + ACPI_FREE(obj); + + return ret; +} + +static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg) +{ + int ret; + struct dmar_drhd_unit *dmaru; + + dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header); + if (!dmaru) + return -ENODEV; + + ret = dmar_ir_hotplug(dmaru, true); + if (ret == 0) + ret = dmar_iommu_hotplug(dmaru, true); + + return ret; +} + +static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg) +{ + int i, ret; + struct device *dev; + struct dmar_drhd_unit *dmaru; + + dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header); + if (!dmaru) + return 0; + + /* + * All PCI devices managed by this unit should have been destroyed. + */ + if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) + for_each_active_dev_scope(dmaru->devices, + dmaru->devices_cnt, i, dev) + return -EBUSY; + + ret = dmar_ir_hotplug(dmaru, false); + if (ret == 0) + ret = dmar_iommu_hotplug(dmaru, false); + + return ret; +} + +static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg) +{ + struct dmar_drhd_unit *dmaru; + + dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header); + if (dmaru) { + list_del_rcu(&dmaru->list); + synchronize_rcu(); + dmar_free_drhd(dmaru); + } + + return 0; +} + +static int dmar_hotplug_insert(acpi_handle handle) +{ + int ret; + int drhd_count = 0; + + ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, + &dmar_validate_one_drhd, (void *)1); + if (ret) + goto out; + + ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, + &dmar_parse_one_drhd, (void *)&drhd_count); + if (ret == 0 && drhd_count == 0) { + pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n"); + goto out; + } else if (ret) { + goto release_drhd; + } + + ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA, + &dmar_parse_one_rhsa, NULL); + if (ret) + goto release_drhd; + + ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR, + &dmar_parse_one_atsr, NULL); + if (ret) + goto release_atsr; + + ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, + &dmar_hp_add_drhd, NULL); + if (!ret) + return 0; + + dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, + &dmar_hp_remove_drhd, NULL); +release_atsr: + dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR, + &dmar_release_one_atsr, NULL); +release_drhd: + dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, + &dmar_hp_release_drhd, NULL); +out: + return ret; +} + +static int dmar_hotplug_remove(acpi_handle handle) +{ + int ret; + + ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR, + &dmar_check_one_atsr, NULL); + if (ret) + return ret; + + ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, + &dmar_hp_remove_drhd, NULL); + if (ret == 0) { + WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR, + &dmar_release_one_atsr, NULL)); + WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, + &dmar_hp_release_drhd, NULL)); + } else { + dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, + &dmar_hp_add_drhd, NULL); + } + + return ret; +} + +static int dmar_device_hotplug(acpi_handle handle, bool insert) +{ + int ret; + + if (!dmar_in_use()) + return 0; + + if (!dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) + return 0; + + down_write(&dmar_global_lock); + if (insert) + ret = dmar_hotplug_insert(handle); + else + ret = dmar_hotplug_remove(handle); + up_write(&dmar_global_lock); + + return ret; +} + +int dmar_device_add(acpi_handle handle) +{ + return dmar_device_hotplug(handle, true); +} + +int dmar_device_remove(acpi_handle handle) +{ + return dmar_device_hotplug(handle, false); +} diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 2779354..7c49ab5 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -3703,17 +3703,48 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg) return 0; } -int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg) +static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr) +{ + struct dmar_atsr_unit *atsru; + struct acpi_dmar_atsr *tmp; + + list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) { + tmp = (struct acpi_dmar_atsr *)atsru->hdr; + if (atsr->segment != tmp->segment) + continue; + if (atsr->header.length != tmp->header.length) + continue; + if (memcmp(atsr, tmp, atsr->header.length) == 0) + return atsru; + } + + return NULL; +} + +int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg) { struct acpi_dmar_atsr *atsr; struct dmar_atsr_unit *atsru; + if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled) + return 0; + atsr = container_of(hdr, struct acpi_dmar_atsr, header); - atsru = kzalloc(sizeof(*atsru), GFP_KERNEL); + atsru = dmar_find_atsr(atsr); + if (atsru) + return 0; + + atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL); if (!atsru) return -ENOMEM; - atsru->hdr = hdr; + /* + * If memory is allocated from slab by ACPI _DSM method, we need to + * copy the memory content because the memory buffer will be freed + * on return. + */ + atsru->hdr = (void *)(atsru + 1); + memcpy(atsru->hdr, hdr, hdr->length); atsru->include_all = atsr->flags & 0x1; if (!atsru->include_all) { atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1), @@ -3736,6 +3767,47 @@ static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru) kfree(atsru); } +int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg) +{ + struct acpi_dmar_atsr *atsr; + struct dmar_atsr_unit *atsru; + + atsr = container_of(hdr, struct acpi_dmar_atsr, header); + atsru = dmar_find_atsr(atsr); + if (atsru) { + list_del_rcu(&atsru->list); + synchronize_rcu(); + intel_iommu_free_atsr(atsru); + } + + return 0; +} + +int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg) +{ + int i; + struct device *dev; + struct acpi_dmar_atsr *atsr; + struct dmar_atsr_unit *atsru; + + atsr = container_of(hdr, struct acpi_dmar_atsr, header); + atsru = dmar_find_atsr(atsr); + if (!atsru) + return 0; + + if (!atsru->include_all && atsru->devices && atsru->devices_cnt) + for_each_active_dev_scope(atsru->devices, atsru->devices_cnt, + i, dev) + return -EBUSY; + + return 0; +} + +int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert) +{ + return intel_iommu_enabled ? -ENOSYS : 0; +} + static void intel_iommu_free_dmars(void) { struct dmar_rmrr_unit *rmrru, *rmrr_n; diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 7c80661..1cbdb50 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -1171,3 +1171,8 @@ struct irq_remap_ops intel_irq_remap_ops = { .msi_setup_irq = intel_msi_setup_irq, .alloc_hpet_msi = intel_alloc_hpet_msi, }; + +int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert) +{ + return irq_remapping_enabled ? -ENOSYS : 0; +} diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 725204f..3062495 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -126,6 +126,8 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, /* Intel IOMMU detection */ extern int detect_intel_iommu(void); extern int enable_drhd_fault_handling(void); +extern int dmar_device_add(acpi_handle handle); +extern int dmar_device_remove(acpi_handle handle); static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg) { @@ -137,17 +139,48 @@ extern int iommu_detected, no_iommu; extern int intel_iommu_init(void); extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg); extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg); +extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg); +extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg); +extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert); extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); #else /* !CONFIG_INTEL_IOMMU: */ static inline int intel_iommu_init(void) { return -ENODEV; } + #define dmar_parse_one_rmrr dmar_res_noop #define dmar_parse_one_atsr dmar_res_noop +#define dmar_check_one_atsr dmar_res_noop +#define dmar_release_one_atsr dmar_res_noop + static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) { return 0; } + +static inline int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert) +{ + return 0; +} #endif /* CONFIG_INTEL_IOMMU */ +#ifdef CONFIG_IRQ_REMAP +extern int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert); +#else /* CONFIG_IRQ_REMAP */ +static inline int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert) +{ return 0; } +#endif /* CONFIG_IRQ_REMAP */ + +#else /* CONFIG_DMAR_TABLE */ + +static inline int dmar_device_add(void *handle) +{ + return 0; +} + +static inline int dmar_device_remove(void *handle) +{ + return 0; +} + #endif /* CONFIG_DMAR_TABLE */ struct irte { -- cgit v0.10.2 From d35165a955f095095cdb8512cb7cd8f63101649a Mon Sep 17 00:00:00 2001 From: Jiang Liu Date: Sun, 9 Nov 2014 22:47:59 +0800 Subject: iommu/vt-d: Search for ACPI _DSM method for DMAR hotplug According to Intel VT-d specification, _DSM method to support DMAR hotplug should exist directly under corresponding ACPI object representing PCI host bridge. But some BIOSes doesn't conform to this, so search for _DSM method in the subtree starting from the ACPI object representing the PCI host bridge. Signed-off-by: Jiang Liu Reviewed-by: Yijing Wang Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 0bd536d..9847613 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -1942,21 +1942,48 @@ static int dmar_hotplug_remove(acpi_handle handle) return ret; } +static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl, + void *context, void **retval) +{ + acpi_handle *phdl = retval; + + if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) { + *phdl = handle; + return AE_CTRL_TERMINATE; + } + + return AE_OK; +} + static int dmar_device_hotplug(acpi_handle handle, bool insert) { int ret; + acpi_handle tmp = NULL; + acpi_status status; if (!dmar_in_use()) return 0; - if (!dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) + if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) { + tmp = handle; + } else { + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, + ACPI_UINT32_MAX, + dmar_get_dsm_handle, + NULL, NULL, &tmp); + if (ACPI_FAILURE(status)) { + pr_warn("Failed to locate _DSM method.\n"); + return -ENXIO; + } + } + if (tmp == NULL) return 0; down_write(&dmar_global_lock); if (insert) - ret = dmar_hotplug_insert(handle); + ret = dmar_hotplug_insert(tmp); else - ret = dmar_hotplug_remove(handle); + ret = dmar_hotplug_remove(tmp); up_write(&dmar_global_lock); return ret; -- cgit v0.10.2 From a7a3dad944344caf034699b0c0e8dc51b469cf20 Mon Sep 17 00:00:00 2001 From: Jiang Liu Date: Sun, 9 Nov 2014 22:48:00 +0800 Subject: iommu/vt-d: Enhance intel_irq_remapping driver to support DMAR unit hotplug Implement required callback functions for intel_irq_remapping driver to support DMAR unit hotplug. Signed-off-by: Jiang Liu Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 1cbdb50..7af0b56 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -36,7 +36,6 @@ struct hpet_scope { static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; static struct hpet_scope ir_hpet[MAX_HPET_TBS]; -static int ir_ioapic_num, ir_hpet_num; /* * Lock ordering: @@ -206,7 +205,7 @@ static struct intel_iommu *map_hpet_to_ir(u8 hpet_id) int i; for (i = 0; i < MAX_HPET_TBS; i++) - if (ir_hpet[i].id == hpet_id) + if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu) return ir_hpet[i].iommu; return NULL; } @@ -216,7 +215,7 @@ static struct intel_iommu *map_ioapic_to_ir(int apic) int i; for (i = 0; i < MAX_IO_APICS; i++) - if (ir_ioapic[i].id == apic) + if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu) return ir_ioapic[i].iommu; return NULL; } @@ -325,7 +324,7 @@ static int set_ioapic_sid(struct irte *irte, int apic) down_read(&dmar_global_lock); for (i = 0; i < MAX_IO_APICS; i++) { - if (ir_ioapic[i].id == apic) { + if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) { sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; break; } @@ -352,7 +351,7 @@ static int set_hpet_sid(struct irte *irte, u8 id) down_read(&dmar_global_lock); for (i = 0; i < MAX_HPET_TBS; i++) { - if (ir_hpet[i].id == id) { + if (ir_hpet[i].iommu && ir_hpet[i].id == id) { sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; break; } @@ -473,17 +472,17 @@ static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode) raw_spin_unlock_irqrestore(&iommu->register_lock, flags); } - -static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode) +static int intel_setup_irq_remapping(struct intel_iommu *iommu) { struct ir_table *ir_table; struct page *pages; unsigned long *bitmap; - ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), - GFP_ATOMIC); + if (iommu->ir_table) + return 0; - if (!iommu->ir_table) + ir_table = kzalloc(sizeof(struct ir_table), GFP_ATOMIC); + if (!ir_table) return -ENOMEM; pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, @@ -492,24 +491,37 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode) if (!pages) { pr_err("IR%d: failed to allocate pages of order %d\n", iommu->seq_id, INTR_REMAP_PAGE_ORDER); - kfree(iommu->ir_table); - return -ENOMEM; + goto out_free_table; } bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES), sizeof(long), GFP_ATOMIC); if (bitmap == NULL) { pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id); - __free_pages(pages, INTR_REMAP_PAGE_ORDER); - kfree(ir_table); - return -ENOMEM; + goto out_free_pages; } ir_table->base = page_address(pages); ir_table->bitmap = bitmap; - - iommu_set_irq_remapping(iommu, mode); + iommu->ir_table = ir_table; return 0; + +out_free_pages: + __free_pages(pages, INTR_REMAP_PAGE_ORDER); +out_free_table: + kfree(ir_table); + return -ENOMEM; +} + +static void intel_teardown_irq_remapping(struct intel_iommu *iommu) +{ + if (iommu && iommu->ir_table) { + free_pages((unsigned long)iommu->ir_table->base, + INTR_REMAP_PAGE_ORDER); + kfree(iommu->ir_table->bitmap); + kfree(iommu->ir_table); + iommu->ir_table = NULL; + } } /* @@ -666,9 +678,10 @@ static int __init intel_enable_irq_remapping(void) if (!ecap_ir_support(iommu->ecap)) continue; - if (intel_setup_irq_remapping(iommu, eim)) + if (intel_setup_irq_remapping(iommu)) goto error; + iommu_set_irq_remapping(iommu, eim); setup = 1; } @@ -699,12 +712,13 @@ error: return -1; } -static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, - struct intel_iommu *iommu) +static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, + struct intel_iommu *iommu, + struct acpi_dmar_hardware_unit *drhd) { struct acpi_dmar_pci_path *path; u8 bus; - int count; + int count, free = -1; bus = scope->bus; path = (struct acpi_dmar_pci_path *)(scope + 1); @@ -720,19 +734,36 @@ static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, PCI_SECONDARY_BUS); path++; } - ir_hpet[ir_hpet_num].bus = bus; - ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->device, path->function); - ir_hpet[ir_hpet_num].iommu = iommu; - ir_hpet[ir_hpet_num].id = scope->enumeration_id; - ir_hpet_num++; + + for (count = 0; count < MAX_HPET_TBS; count++) { + if (ir_hpet[count].iommu == iommu && + ir_hpet[count].id == scope->enumeration_id) + return 0; + else if (ir_hpet[count].iommu == NULL && free == -1) + free = count; + } + if (free == -1) { + pr_warn("Exceeded Max HPET blocks\n"); + return -ENOSPC; + } + + ir_hpet[free].iommu = iommu; + ir_hpet[free].id = scope->enumeration_id; + ir_hpet[free].bus = bus; + ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function); + pr_info("HPET id %d under DRHD base 0x%Lx\n", + scope->enumeration_id, drhd->address); + + return 0; } -static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, - struct intel_iommu *iommu) +static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, + struct intel_iommu *iommu, + struct acpi_dmar_hardware_unit *drhd) { struct acpi_dmar_pci_path *path; u8 bus; - int count; + int count, free = -1; bus = scope->bus; path = (struct acpi_dmar_pci_path *)(scope + 1); @@ -749,54 +780,63 @@ static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, path++; } - ir_ioapic[ir_ioapic_num].bus = bus; - ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->device, path->function); - ir_ioapic[ir_ioapic_num].iommu = iommu; - ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; - ir_ioapic_num++; + for (count = 0; count < MAX_IO_APICS; count++) { + if (ir_ioapic[count].iommu == iommu && + ir_ioapic[count].id == scope->enumeration_id) + return 0; + else if (ir_ioapic[count].iommu == NULL && free == -1) + free = count; + } + if (free == -1) { + pr_warn("Exceeded Max IO APICS\n"); + return -ENOSPC; + } + + ir_ioapic[free].bus = bus; + ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function); + ir_ioapic[free].iommu = iommu; + ir_ioapic[free].id = scope->enumeration_id; + pr_info("IOAPIC id %d under DRHD base 0x%Lx IOMMU %d\n", + scope->enumeration_id, drhd->address, iommu->seq_id); + + return 0; } static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header, struct intel_iommu *iommu) { + int ret = 0; struct acpi_dmar_hardware_unit *drhd; struct acpi_dmar_device_scope *scope; void *start, *end; drhd = (struct acpi_dmar_hardware_unit *)header; - start = (void *)(drhd + 1); end = ((void *)drhd) + header->length; - while (start < end) { + while (start < end && ret == 0) { scope = start; - if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) { - if (ir_ioapic_num == MAX_IO_APICS) { - printk(KERN_WARNING "Exceeded Max IO APICS\n"); - return -1; - } - - printk(KERN_INFO "IOAPIC id %d under DRHD base " - " 0x%Lx IOMMU %d\n", scope->enumeration_id, - drhd->address, iommu->seq_id); + if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) + ret = ir_parse_one_ioapic_scope(scope, iommu, drhd); + else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) + ret = ir_parse_one_hpet_scope(scope, iommu, drhd); + start += scope->length; + } - ir_parse_one_ioapic_scope(scope, iommu); - } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) { - if (ir_hpet_num == MAX_HPET_TBS) { - printk(KERN_WARNING "Exceeded Max HPET blocks\n"); - return -1; - } + return ret; +} - printk(KERN_INFO "HPET id %d under DRHD base" - " 0x%Lx\n", scope->enumeration_id, - drhd->address); +static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu) +{ + int i; - ir_parse_one_hpet_scope(scope, iommu); - } - start += scope->length; - } + for (i = 0; i < MAX_HPET_TBS; i++) + if (ir_hpet[i].iommu == iommu) + ir_hpet[i].iommu = NULL; - return 0; + for (i = 0; i < MAX_IO_APICS; i++) + if (ir_ioapic[i].iommu == iommu) + ir_ioapic[i].iommu = NULL; } /* @@ -1172,7 +1212,85 @@ struct irq_remap_ops intel_irq_remap_ops = { .alloc_hpet_msi = intel_alloc_hpet_msi, }; +/* + * Support of Interrupt Remapping Unit Hotplug + */ +static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu) +{ + int ret; + int eim = x2apic_enabled(); + + if (eim && !ecap_eim_support(iommu->ecap)) { + pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n", + iommu->reg_phys, iommu->ecap); + return -ENODEV; + } + + if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) { + pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET\n", + iommu->reg_phys); + return -ENODEV; + } + + /* TODO: check all IOAPICs are covered by IOMMU */ + + /* Setup Interrupt-remapping now. */ + ret = intel_setup_irq_remapping(iommu); + if (ret) { + pr_err("DRHD %Lx: failed to allocate resource\n", + iommu->reg_phys); + ir_remove_ioapic_hpet_scope(iommu); + return ret; + } + + if (!iommu->qi) { + /* Clear previous faults. */ + dmar_fault(-1, iommu); + iommu_disable_irq_remapping(iommu); + dmar_disable_qi(iommu); + } + + /* Enable queued invalidation */ + ret = dmar_enable_qi(iommu); + if (!ret) { + iommu_set_irq_remapping(iommu, eim); + } else { + pr_err("DRHD %Lx: failed to enable queued invalidation, ecap %Lx, ret %d\n", + iommu->reg_phys, iommu->ecap, ret); + intel_teardown_irq_remapping(iommu); + ir_remove_ioapic_hpet_scope(iommu); + } + + return ret; +} + int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert) { - return irq_remapping_enabled ? -ENOSYS : 0; + int ret = 0; + struct intel_iommu *iommu = dmaru->iommu; + + if (!irq_remapping_enabled) + return 0; + if (iommu == NULL) + return -EINVAL; + if (!ecap_ir_support(iommu->ecap)) + return 0; + + if (insert) { + if (!iommu->ir_table) + ret = dmar_ir_add(dmaru, iommu); + } else { + if (iommu->ir_table) { + if (!bitmap_empty(iommu->ir_table->bitmap, + INTR_REMAP_TABLE_ENTRIES)) { + ret = -EBUSY; + } else { + iommu_disable_irq_remapping(iommu); + intel_teardown_irq_remapping(iommu); + ir_remove_ioapic_hpet_scope(iommu); + } + } + } + + return ret; } -- cgit v0.10.2 From 51acce33c4df6ee23b5ad4c2e6c239e0d6f25771 Mon Sep 17 00:00:00 2001 From: Jiang Liu Date: Sun, 9 Nov 2014 22:48:01 +0800 Subject: iommu/vt-d: Enhance error recovery in function intel_enable_irq_remapping() Enhance error recovery in function intel_enable_irq_remapping() by tearing down all created data structures. Signed-off-by: Jiang Liu Reviewed-by: Yijing Wang Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 7af0b56..27541d4 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -702,9 +702,11 @@ static int __init intel_enable_irq_remapping(void) return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE; error: - /* - * handle error condition gracefully here! - */ + for_each_iommu(iommu, drhd) + if (ecap_ir_support(iommu->ecap)) { + iommu_disable_irq_remapping(iommu); + intel_teardown_irq_remapping(iommu); + } if (x2apic_present) pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n"); -- cgit v0.10.2 From ffebeb46dd34736c90ffbca1ccb0bef8f4827c44 Mon Sep 17 00:00:00 2001 From: Jiang Liu Date: Sun, 9 Nov 2014 22:48:02 +0800 Subject: iommu/vt-d: Enhance intel-iommu driver to support DMAR unit hotplug Implement required callback functions for intel-iommu driver to support DMAR unit hotplug. Signed-off-by: Jiang Liu Reviewed-by: Yijing Wang Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 7c49ab5..99bf651 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -1127,8 +1127,11 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) unsigned long flags; root = (struct root_entry *)alloc_pgtable_page(iommu->node); - if (!root) + if (!root) { + pr_err("IOMMU: allocating root entry for %s failed\n", + iommu->name); return -ENOMEM; + } __iommu_flush_cache(iommu, root, ROOT_SIZE); @@ -1468,7 +1471,7 @@ static int iommu_init_domains(struct intel_iommu *iommu) return 0; } -static void free_dmar_iommu(struct intel_iommu *iommu) +static void disable_dmar_iommu(struct intel_iommu *iommu) { struct dmar_domain *domain; int i; @@ -1492,11 +1495,16 @@ static void free_dmar_iommu(struct intel_iommu *iommu) if (iommu->gcmd & DMA_GCMD_TE) iommu_disable_translation(iommu); +} - kfree(iommu->domains); - kfree(iommu->domain_ids); - iommu->domains = NULL; - iommu->domain_ids = NULL; +static void free_dmar_iommu(struct intel_iommu *iommu) +{ + if ((iommu->domains) && (iommu->domain_ids)) { + kfree(iommu->domains); + kfree(iommu->domain_ids); + iommu->domains = NULL; + iommu->domain_ids = NULL; + } g_iommus[iommu->seq_id] = NULL; @@ -2703,6 +2711,41 @@ static int __init iommu_prepare_static_identity_mapping(int hw) return 0; } +static void intel_iommu_init_qi(struct intel_iommu *iommu) +{ + /* + * Start from the sane iommu hardware state. + * If the queued invalidation is already initialized by us + * (for example, while enabling interrupt-remapping) then + * we got the things already rolling from a sane state. + */ + if (!iommu->qi) { + /* + * Clear any previous faults. + */ + dmar_fault(-1, iommu); + /* + * Disable queued invalidation if supported and already enabled + * before OS handover. + */ + dmar_disable_qi(iommu); + } + + if (dmar_enable_qi(iommu)) { + /* + * Queued Invalidate not enabled, use Register Based Invalidate + */ + iommu->flush.flush_context = __iommu_flush_context; + iommu->flush.flush_iotlb = __iommu_flush_iotlb; + pr_info("IOMMU: %s using Register based invalidation\n", + iommu->name); + } else { + iommu->flush.flush_context = qi_flush_context; + iommu->flush.flush_iotlb = qi_flush_iotlb; + pr_info("IOMMU: %s using Queued invalidation\n", iommu->name); + } +} + static int __init init_dmars(void) { struct dmar_drhd_unit *drhd; @@ -2731,6 +2774,10 @@ static int __init init_dmars(void) DMAR_UNITS_SUPPORTED); } + /* Preallocate enough resources for IOMMU hot-addition */ + if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) + g_num_of_iommus = DMAR_UNITS_SUPPORTED; + g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *), GFP_KERNEL); if (!g_iommus) { @@ -2759,58 +2806,14 @@ static int __init init_dmars(void) * among all IOMMU's. Need to Split it later. */ ret = iommu_alloc_root_entry(iommu); - if (ret) { - printk(KERN_ERR "IOMMU: allocate root entry failed\n"); + if (ret) goto free_iommu; - } if (!ecap_pass_through(iommu->ecap)) hw_pass_through = 0; } - /* - * Start from the sane iommu hardware state. - */ - for_each_active_iommu(iommu, drhd) { - /* - * If the queued invalidation is already initialized by us - * (for example, while enabling interrupt-remapping) then - * we got the things already rolling from a sane state. - */ - if (iommu->qi) - continue; - - /* - * Clear any previous faults. - */ - dmar_fault(-1, iommu); - /* - * Disable queued invalidation if supported and already enabled - * before OS handover. - */ - dmar_disable_qi(iommu); - } - - for_each_active_iommu(iommu, drhd) { - if (dmar_enable_qi(iommu)) { - /* - * Queued Invalidate not enabled, use Register Based - * Invalidate - */ - iommu->flush.flush_context = __iommu_flush_context; - iommu->flush.flush_iotlb = __iommu_flush_iotlb; - printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based " - "invalidation\n", - iommu->seq_id, - (unsigned long long)drhd->reg_base_addr); - } else { - iommu->flush.flush_context = qi_flush_context; - iommu->flush.flush_iotlb = qi_flush_iotlb; - printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued " - "invalidation\n", - iommu->seq_id, - (unsigned long long)drhd->reg_base_addr); - } - } + for_each_active_iommu(iommu, drhd) + intel_iommu_init_qi(iommu); if (iommu_pass_through) iommu_identity_mapping |= IDENTMAP_ALL; @@ -2896,8 +2899,10 @@ static int __init init_dmars(void) return 0; free_iommu: - for_each_active_iommu(iommu, drhd) + for_each_active_iommu(iommu, drhd) { + disable_dmar_iommu(iommu); free_dmar_iommu(iommu); + } kfree(deferred_flush); free_g_iommus: kfree(g_iommus); @@ -3803,9 +3808,100 @@ int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg) return 0; } +static int intel_iommu_add(struct dmar_drhd_unit *dmaru) +{ + int sp, ret = 0; + struct intel_iommu *iommu = dmaru->iommu; + + if (g_iommus[iommu->seq_id]) + return 0; + + if (hw_pass_through && !ecap_pass_through(iommu->ecap)) { + pr_warn("IOMMU: %s doesn't support hardware pass through.\n", + iommu->name); + return -ENXIO; + } + if (!ecap_sc_support(iommu->ecap) && + domain_update_iommu_snooping(iommu)) { + pr_warn("IOMMU: %s doesn't support snooping.\n", + iommu->name); + return -ENXIO; + } + sp = domain_update_iommu_superpage(iommu) - 1; + if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) { + pr_warn("IOMMU: %s doesn't support large page.\n", + iommu->name); + return -ENXIO; + } + + /* + * Disable translation if already enabled prior to OS handover. + */ + if (iommu->gcmd & DMA_GCMD_TE) + iommu_disable_translation(iommu); + + g_iommus[iommu->seq_id] = iommu; + ret = iommu_init_domains(iommu); + if (ret == 0) + ret = iommu_alloc_root_entry(iommu); + if (ret) + goto out; + + if (dmaru->ignored) { + /* + * we always have to disable PMRs or DMA may fail on this device + */ + if (force_on) + iommu_disable_protect_mem_regions(iommu); + return 0; + } + + intel_iommu_init_qi(iommu); + iommu_flush_write_buffer(iommu); + ret = dmar_set_interrupt(iommu); + if (ret) + goto disable_iommu; + + iommu_set_root_entry(iommu); + iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); + iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); + iommu_enable_translation(iommu); + + if (si_domain) { + ret = iommu_attach_domain(si_domain, iommu); + if (ret < 0 || si_domain->id != ret) + goto disable_iommu; + domain_attach_iommu(si_domain, iommu); + } + + iommu_disable_protect_mem_regions(iommu); + return 0; + +disable_iommu: + disable_dmar_iommu(iommu); +out: + free_dmar_iommu(iommu); + return ret; +} + int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert) { - return intel_iommu_enabled ? -ENOSYS : 0; + int ret = 0; + struct intel_iommu *iommu = dmaru->iommu; + + if (!intel_iommu_enabled) + return 0; + if (iommu == NULL) + return -EINVAL; + + if (insert) { + ret = intel_iommu_add(dmaru); + } else { + disable_dmar_iommu(iommu); + free_dmar_iommu(iommu); + } + + return ret; } static void intel_iommu_free_dmars(void) -- cgit v0.10.2 From 864b94adfcba752aa902ee34497bbe58b97aa8d3 Mon Sep 17 00:00:00 2001 From: Jiang Liu Date: Sun, 9 Nov 2014 22:48:03 +0800 Subject: pci, ACPI, iommu: Enhance pci_root to support DMAR device hotplug Finally enhance pci_root driver to support DMAR device hotplug when hot-plugging PCI host bridges. Signed-off-by: Jiang Liu Reviewed-by: Yijing Wang Acked-by: Bjorn Helgaas Signed-off-by: Joerg Roedel diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index cd4de7e..c6bcb8c 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -525,6 +526,7 @@ static int acpi_pci_root_add(struct acpi_device *device, struct acpi_pci_root *root; acpi_handle handle = device->handle; int no_aspm = 0, clear_aspm = 0; + bool hotadd = system_state != SYSTEM_BOOTING; root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); if (!root) @@ -571,6 +573,11 @@ static int acpi_pci_root_add(struct acpi_device *device, strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS); device->driver_data = root; + if (hotadd && dmar_device_add(handle)) { + result = -ENXIO; + goto end; + } + pr_info(PREFIX "%s [%s] (domain %04x %pR)\n", acpi_device_name(device), acpi_device_bid(device), root->segment, &root->secondary); @@ -597,7 +604,7 @@ static int acpi_pci_root_add(struct acpi_device *device, root->segment, (unsigned int)root->secondary.start); device->driver_data = NULL; result = -ENODEV; - goto end; + goto remove_dmar; } if (clear_aspm) { @@ -611,7 +618,7 @@ static int acpi_pci_root_add(struct acpi_device *device, if (device->wakeup.flags.run_wake) device_set_run_wake(root->bus->bridge, true); - if (system_state != SYSTEM_BOOTING) { + if (hotadd) { pcibios_resource_survey_bus(root->bus); pci_assign_unassigned_root_bus_resources(root->bus); } @@ -621,6 +628,9 @@ static int acpi_pci_root_add(struct acpi_device *device, pci_unlock_rescan_remove(); return 1; +remove_dmar: + if (hotadd) + dmar_device_remove(handle); end: kfree(root); return result; @@ -639,6 +649,8 @@ static void acpi_pci_root_remove(struct acpi_device *device) pci_remove_root_bus(root->bus); + dmar_device_remove(device->handle); + pci_unlock_rescan_remove(); kfree(root); -- cgit v0.10.2 From cc4f14aa170d895c9a43bdb56f62070c8a6da908 Mon Sep 17 00:00:00 2001 From: Jiang Liu Date: Wed, 26 Nov 2014 09:42:10 +0800 Subject: iommu/vt-d: Fix an off-by-one bug in __domain_mapping() There's an off-by-one bug in function __domain_mapping(), which may trigger the BUG_ON(nr_pages < lvl_pages) when (nr_pages + 1) & superpage_mask == 0 The issue was introduced by commit 9051aa0268dc "intel-iommu: Combine domain_pfn_mapping() and domain_sg_mapping()", which sets sg_res to "nr_pages + 1" to avoid some of the 'sg_res==0' code paths. It's safe to remove extra "+1" because sg_res is only used to calculate page size now. Reported-And-Tested-by: Sudeep Dutt Signed-off-by: Jiang Liu Cc: # >= 3.0 Acked-By: David Woodhouse Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 99bf651..fe6830a 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -1986,7 +1986,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, { struct dma_pte *first_pte = NULL, *pte = NULL; phys_addr_t uninitialized_var(pteval); - unsigned long sg_res; + unsigned long sg_res = 0; unsigned int largepage_lvl = 0; unsigned long lvl_pages = 0; @@ -1997,10 +1997,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP; - if (sg) - sg_res = 0; - else { - sg_res = nr_pages + 1; + if (!sg) { + sg_res = nr_pages; pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot; } -- cgit v0.10.2 From 18f23409909a9547ac7c149013286f36fcffa433 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Tue, 25 Nov 2014 17:50:55 +0000 Subject: iommu: Decouple iommu_map_sg from CPU page size If the IOMMU supports pages smaller than the CPU page size, segments which lie at offsets within the CPU page may be mapped based on the finer-grained IOMMU page boundaries. This minimises the amount of non-buffer memory between the CPU page boundary and the start of the segment which must be mapped and therefore exposed to the device, and brings the default iommu_map_sg implementation in line with iommu_map/unmap with respect to alignment. Signed-off-by: Robin Murphy Signed-off-by: Joerg Roedel diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 02e4313..1bd6335 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1143,14 +1143,24 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, { struct scatterlist *s; size_t mapped = 0; - unsigned int i; + unsigned int i, min_pagesz; int ret; - for_each_sg(sg, s, nents, i) { - phys_addr_t phys = page_to_phys(sg_page(s)); + if (unlikely(domain->ops->pgsize_bitmap == 0UL)) + return 0; - /* We are mapping on page boundarys, so offset must be 0 */ - if (s->offset) + min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); + + for_each_sg(sg, s, nents, i) { + phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset; + + /* + * We are mapping on IOMMU page boundaries, so offset within + * the page must be 0. However, the IOMMU may support pages + * smaller than PAGE_SIZE, so s->offset may still represent + * an offset of that boundary within the CPU page. + */ + if (!IS_ALIGNED(s->offset, min_pagesz)) goto out_err; ret = iommu_map(domain, iova + mapped, phys, s->length, prot); -- cgit v0.10.2