summaryrefslogtreecommitdiff
path: root/arch/ia64
diff options
context:
space:
mode:
authorXiantao Zhang <xiantao.zhang@intel.com>2008-09-28 08:39:46 (GMT)
committerAvi Kivity <avi@redhat.com>2008-10-15 12:25:38 (GMT)
commitb010eb5103cfbe12ae6f08a4cdb3a748bf78c410 (patch)
treee2b812000bbb1b13edb52667e42a8d04d4ad02e5 /arch/ia64
parent1cbea809c400661eecb538e0dd0bc4f3660f0a35 (diff)
downloadlinux-b010eb5103cfbe12ae6f08a4cdb3a748bf78c410.tar.xz
KVM: ia64: add directed mmio range support for kvm guests
Using vt-d, kvm guests can be assigned physcial devices, so this patch introduce a new mmio type (directed mmio) to handle its mmio access. Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/include/asm/kvm_host.h2
-rw-r--r--arch/ia64/kvm/kvm-ia64.c4
-rw-r--r--arch/ia64/kvm/vcpu.h26
-rw-r--r--arch/ia64/kvm/vtlb.c23
4 files changed, 33 insertions, 22 deletions
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index da579a3..85db124 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -132,7 +132,7 @@
#define GPFN_IOSAPIC (4UL << 60) /* IOSAPIC base */
#define GPFN_LEGACY_IO (5UL << 60) /* Legacy I/O base */
#define GPFN_GFW (6UL << 60) /* Guest Firmware */
-#define GPFN_HIGH_MMIO (7UL << 60) /* High MMIO range */
+#define GPFN_PHYS_MMIO (7UL << 60) /* Directed MMIO Range */
#define GPFN_IO_MASK (7UL << 60) /* Guest pfn is I/O type */
#define GPFN_INV_MASK (1UL << 63) /* Guest pfn is invalid */
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 800a4f2..3df82f3 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1447,11 +1447,11 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
if (!kvm_is_mmio_pfn(pfn)) {
kvm_set_pmt_entry(kvm, base_gfn + i,
pfn << PAGE_SHIFT,
- _PAGE_MA_WB);
+ _PAGE_AR_RWX | _PAGE_MA_WB);
memslot->rmap[i] = (unsigned long)pfn_to_page(pfn);
} else {
kvm_set_pmt_entry(kvm, base_gfn + i,
- GPFN_LOW_MMIO | (pfn << PAGE_SHIFT),
+ GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT),
_PAGE_MA_UC);
memslot->rmap[i] = 0;
}
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h
index b0fcfb6..341e3fe 100644
--- a/arch/ia64/kvm/vcpu.h
+++ b/arch/ia64/kvm/vcpu.h
@@ -313,21 +313,21 @@ static inline void vcpu_set_tr(struct thash_data *trp, u64 pte, u64 itir,
trp->rid = rid;
}
-extern u64 kvm_lookup_mpa(u64 gpfn);
-extern u64 kvm_gpa_to_mpa(u64 gpa);
-
-/* Return I/O type if trye */
-#define __gpfn_is_io(gpfn) \
- ({ \
- u64 pte, ret = 0; \
- pte = kvm_lookup_mpa(gpfn); \
- if (!(pte & GPFN_INV_MASK)) \
- ret = pte & GPFN_IO_MASK; \
- ret; \
- })
+extern u64 kvm_get_mpt_entry(u64 gpfn);
+/* Return I/ */
+static inline u64 __gpfn_is_io(u64 gpfn)
+{
+ u64 pte;
+ pte = kvm_get_mpt_entry(gpfn);
+ if (!(pte & GPFN_INV_MASK)) {
+ pte = pte & GPFN_IO_MASK;
+ if (pte != GPFN_PHYS_MMIO)
+ return pte;
+ }
+ return 0;
+}
#endif
-
#define IA64_NO_FAULT 0
#define IA64_FAULT 1
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index def4576..e22b933 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -390,7 +390,7 @@ void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps)
u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
{
- u64 ps, ps_mask, paddr, maddr;
+ u64 ps, ps_mask, paddr, maddr, io_mask;
union pte_flags phy_pte;
ps = itir_ps(itir);
@@ -398,8 +398,9 @@ u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
phy_pte.val = *pte;
paddr = *pte;
paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask);
- maddr = kvm_lookup_mpa(paddr >> PAGE_SHIFT);
- if (maddr & GPFN_IO_MASK) {
+ maddr = kvm_get_mpt_entry(paddr >> PAGE_SHIFT);
+ io_mask = maddr & GPFN_IO_MASK;
+ if (io_mask && (io_mask != GPFN_PHYS_MMIO)) {
*pte |= VTLB_PTE_IO;
return -1;
}
@@ -418,7 +419,7 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
u64 ifa, int type)
{
u64 ps;
- u64 phy_pte;
+ u64 phy_pte, io_mask, index;
union ia64_rr vrr, mrr;
int ret = 0;
@@ -426,13 +427,16 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
vrr.val = vcpu_get_rr(v, ifa);
mrr.val = ia64_get_rr(ifa);
+ index = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
+ io_mask = kvm_get_mpt_entry(index) & GPFN_IO_MASK;
phy_pte = translate_phy_pte(&pte, itir, ifa);
/* Ensure WB attribute if pte is related to a normal mem page,
* which is required by vga acceleration since qemu maps shared
* vram buffer with WB.
*/
- if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT)) {
+ if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT) &&
+ io_mask != GPFN_PHYS_MMIO) {
pte &= ~_PAGE_MA_MASK;
phy_pte &= ~_PAGE_MA_MASK;
}
@@ -566,12 +570,19 @@ void thash_init(struct thash_cb *hcb, u64 sz)
}
}
-u64 kvm_lookup_mpa(u64 gpfn)
+u64 kvm_get_mpt_entry(u64 gpfn)
{
u64 *base = (u64 *) KVM_P2M_BASE;
return *(base + gpfn);
}
+u64 kvm_lookup_mpa(u64 gpfn)
+{
+ u64 maddr;
+ maddr = kvm_get_mpt_entry(gpfn);
+ return maddr&_PAGE_PPN_MASK;
+}
+
u64 kvm_gpa_to_mpa(u64 gpa)
{
u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT);