summaryrefslogtreecommitdiff
path: root/arch/ia64
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-06-17 10:52:15 (GMT)
committerIngo Molnar <mingo@elte.hu>2009-06-17 10:56:49 (GMT)
commiteadb8a091b27a840de7450f84ecff5ef13476424 (patch)
tree58c3782d40def63baa8167f3d31e3048cb4c7660 /arch/ia64
parent73874005cd8800440be4299bd095387fff4b90ac (diff)
parent65795efbd380a832ae508b04dba8f8e53f0b84d9 (diff)
downloadlinux-fsl-qoriq-eadb8a091b27a840de7450f84ecff5ef13476424.tar.xz
Merge branch 'linus' into tracing/hw-breakpoints
Conflicts: arch/x86/Kconfig arch/x86/kernel/traps.c arch/x86/power/cpu.c arch/x86/power/cpu_32.c kernel/Makefile Semantic conflict: arch/x86/kernel/hw_breakpoint.c Merge reason: Resolve the conflicts, move from put_cpu_no_sched() to put_cpu() in arch/x86/kernel/hw_breakpoint.c. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/hp/common/sba_iommu.c2
-rw-r--r--arch/ia64/hp/sim/hpsim_irq.c3
-rw-r--r--arch/ia64/include/asm/atomic.h2
-rw-r--r--arch/ia64/include/asm/bitsperlong.h8
-rw-r--r--arch/ia64/include/asm/kmap_types.h24
-rw-r--r--arch/ia64/include/asm/kvm_host.h6
-rw-r--r--arch/ia64/include/asm/mman.h2
-rw-r--r--arch/ia64/include/asm/pgtable.h2
-rw-r--r--arch/ia64/include/asm/signal.h2
-rw-r--r--arch/ia64/include/asm/suspend.h1
-rw-r--r--arch/ia64/include/asm/types.h7
-rw-r--r--arch/ia64/kernel/acpi.c5
-rw-r--r--arch/ia64/kernel/init_task.c4
-rw-r--r--arch/ia64/kernel/iosapic.c10
-rw-r--r--arch/ia64/kernel/irq_ia64.c3
-rw-r--r--arch/ia64/kernel/mca.c3
-rw-r--r--arch/ia64/kernel/msi_ia64.c16
-rw-r--r--arch/ia64/kernel/perfmon.c2
-rw-r--r--arch/ia64/kernel/uncached.c3
-rw-r--r--arch/ia64/kvm/Kconfig2
-rw-r--r--arch/ia64/kvm/kvm-ia64.c263
-rw-r--r--arch/ia64/kvm/kvm_fw.c28
-rw-r--r--arch/ia64/kvm/lapic.h6
-rw-r--r--arch/ia64/kvm/optvfault.S30
-rw-r--r--arch/ia64/kvm/process.c5
-rw-r--r--arch/ia64/kvm/vcpu.c20
-rw-r--r--arch/ia64/kvm/vmm.c12
-rw-r--r--arch/ia64/kvm/vmm_ivt.S18
-rw-r--r--arch/ia64/kvm/vtlb.c3
-rw-r--r--arch/ia64/mm/extable.c28
-rw-r--r--arch/ia64/sn/kernel/irq.c4
-rw-r--r--arch/ia64/sn/kernel/msi_sn.c8
-rw-r--r--arch/ia64/sn/pci/pci_dma.c3
33 files changed, 362 insertions, 173 deletions
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 56ceb68..fe63b2dc9 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -1131,7 +1131,7 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp
#ifdef CONFIG_NUMA
{
struct page *page;
- page = alloc_pages_node(ioc->node == MAX_NUMNODES ?
+ page = alloc_pages_exact_node(ioc->node == MAX_NUMNODES ?
numa_node_id() : ioc->node, flags,
get_order(size));
diff --git a/arch/ia64/hp/sim/hpsim_irq.c b/arch/ia64/hp/sim/hpsim_irq.c
index cc0a318..acb5047 100644
--- a/arch/ia64/hp/sim/hpsim_irq.c
+++ b/arch/ia64/hp/sim/hpsim_irq.c
@@ -21,9 +21,10 @@ hpsim_irq_noop (unsigned int irq)
{
}
-static void
+static int
hpsim_set_affinity_noop(unsigned int a, const struct cpumask *b)
{
+ return 0;
}
static struct hw_interrupt_type irq_type_hp_sim = {
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index d37292b..88405cb 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -216,5 +216,5 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
-#include <asm-generic/atomic.h>
+#include <asm-generic/atomic-long.h>
#endif /* _ASM_IA64_ATOMIC_H */
diff --git a/arch/ia64/include/asm/bitsperlong.h b/arch/ia64/include/asm/bitsperlong.h
new file mode 100644
index 0000000..ec4db3c
--- /dev/null
+++ b/arch/ia64/include/asm/bitsperlong.h
@@ -0,0 +1,8 @@
+#ifndef __ASM_IA64_BITSPERLONG_H
+#define __ASM_IA64_BITSPERLONG_H
+
+#define __BITS_PER_LONG 64
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* __ASM_IA64_BITSPERLONG_H */
diff --git a/arch/ia64/include/asm/kmap_types.h b/arch/ia64/include/asm/kmap_types.h
index 5d1658a..05d5f99 100644
--- a/arch/ia64/include/asm/kmap_types.h
+++ b/arch/ia64/include/asm/kmap_types.h
@@ -1,30 +1,12 @@
#ifndef _ASM_IA64_KMAP_TYPES_H
#define _ASM_IA64_KMAP_TYPES_H
-
#ifdef CONFIG_DEBUG_HIGHMEM
-# define D(n) __KM_FENCE_##n ,
-#else
-# define D(n)
+#define __WITH_KM_FENCE
#endif
-enum km_type {
-D(0) KM_BOUNCE_READ,
-D(1) KM_SKB_SUNRPC_DATA,
-D(2) KM_SKB_DATA_SOFTIRQ,
-D(3) KM_USER0,
-D(4) KM_USER1,
-D(5) KM_BIO_SRC_IRQ,
-D(6) KM_BIO_DST_IRQ,
-D(7) KM_PTE0,
-D(8) KM_PTE1,
-D(9) KM_IRQ0,
-D(10) KM_IRQ1,
-D(11) KM_SOFTIRQ0,
-D(12) KM_SOFTIRQ1,
-D(13) KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
-#undef D
+#undef __WITH_KM_FENCE
#endif /* _ASM_IA64_KMAP_TYPES_H */
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index 4542651..5f43697 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -371,6 +371,7 @@ struct kvm_vcpu_arch {
int last_run_cpu;
int vmm_tr_slot;
int vm_tr_slot;
+ int sn_rtc_tr_slot;
#define KVM_MP_STATE_RUNNABLE 0
#define KVM_MP_STATE_UNINITIALIZED 1
@@ -465,6 +466,7 @@ struct kvm_arch {
unsigned long vmm_init_rr;
int online_vcpus;
+ int is_sn2;
struct kvm_ioapic *vioapic;
struct kvm_vm_stat stat;
@@ -472,6 +474,7 @@ struct kvm_arch {
struct list_head assigned_dev_head;
struct iommu_domain *iommu_domain;
+ int iommu_flags;
struct hlist_head irq_ack_notifier_list;
unsigned long irq_sources_bitmap;
@@ -578,6 +581,8 @@ struct kvm_vmm_info{
kvm_vmm_entry *vmm_entry;
kvm_tramp_entry *tramp_entry;
unsigned long vmm_ivt;
+ unsigned long patch_mov_ar;
+ unsigned long patch_mov_ar_sn2;
};
int kvm_highest_pending_irq(struct kvm_vcpu *vcpu);
@@ -585,7 +590,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu);
int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
void kvm_sal_emul(struct kvm_vcpu *vcpu);
-static inline void kvm_inject_nmi(struct kvm_vcpu *vcpu) {}
#endif /* __ASSEMBLY__*/
#endif
diff --git a/arch/ia64/include/asm/mman.h b/arch/ia64/include/asm/mman.h
index c73b878..48cf8b9 100644
--- a/arch/ia64/include/asm/mman.h
+++ b/arch/ia64/include/asm/mman.h
@@ -8,7 +8,7 @@
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
*/
-#include <asm-generic/mman.h>
+#include <asm-generic/mman-common.h>
#define MAP_GROWSDOWN 0x00100 /* stack-like segment */
#define MAP_GROWSUP 0x00200 /* register stack-like segment */
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 7a9bff4..0a9cc73 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -146,6 +146,8 @@
#define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
#define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
#define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
+#define PAGE_KERNEL_UC __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX | \
+ _PAGE_MA_UC)
# ifndef __ASSEMBLY__
diff --git a/arch/ia64/include/asm/signal.h b/arch/ia64/include/asm/signal.h
index 4f5ca56..b166248 100644
--- a/arch/ia64/include/asm/signal.h
+++ b/arch/ia64/include/asm/signal.h
@@ -114,7 +114,7 @@
#endif /* __KERNEL__ */
-#include <asm-generic/signal.h>
+#include <asm-generic/signal-defs.h>
# ifndef __ASSEMBLY__
diff --git a/arch/ia64/include/asm/suspend.h b/arch/ia64/include/asm/suspend.h
deleted file mode 100644
index b05bbb6..0000000
--- a/arch/ia64/include/asm/suspend.h
+++ /dev/null
@@ -1 +0,0 @@
-/* dummy (must be non-empty to prevent prejudicial removal...) */
diff --git a/arch/ia64/include/asm/types.h b/arch/ia64/include/asm/types.h
index e36b371..fbf1ed3 100644
--- a/arch/ia64/include/asm/types.h
+++ b/arch/ia64/include/asm/types.h
@@ -19,10 +19,6 @@
# define __IA64_UL(x) (x)
# define __IA64_UL_CONST(x) x
-# ifdef __KERNEL__
-# define BITS_PER_LONG 64
-# endif
-
#else
# define __IA64_UL(x) ((unsigned long)(x))
# define __IA64_UL_CONST(x) x##UL
@@ -34,10 +30,7 @@ typedef unsigned int umode_t;
*/
# ifdef __KERNEL__
-#define BITS_PER_LONG 64
-
/* DMA addresses are 64-bits wide, in general. */
-
typedef u64 dma_addr_t;
# endif /* __KERNEL__ */
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 5510317..baec6f0 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -636,7 +636,7 @@ void __init acpi_numa_arch_fixup(void)
* success: return IRQ number (>=0)
* failure: return < 0
*/
-int acpi_register_gsi(u32 gsi, int triggering, int polarity)
+int acpi_register_gsi(struct device *dev, u32 gsi, int triggering, int polarity)
{
if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM)
return gsi;
@@ -678,7 +678,8 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table)
fadt = (struct acpi_table_fadt *)fadt_header;
- acpi_register_gsi(fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
+ acpi_register_gsi(NULL, fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE,
+ ACPI_ACTIVE_LOW);
return 0;
}
diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c
index 5b0e830..c475fc2 100644
--- a/arch/ia64/kernel/init_task.c
+++ b/arch/ia64/kernel/init_task.c
@@ -19,10 +19,6 @@
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
/*
* Initial task structure.
*
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 166e0d8..f92cef4 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -329,7 +329,7 @@ unmask_irq (unsigned int irq)
}
-static void
+static int
iosapic_set_affinity(unsigned int irq, const struct cpumask *mask)
{
#ifdef CONFIG_SMP
@@ -343,15 +343,15 @@ iosapic_set_affinity(unsigned int irq, const struct cpumask *mask)
cpu = cpumask_first_and(cpu_online_mask, mask);
if (cpu >= nr_cpu_ids)
- return;
+ return -1;
if (irq_prepare_move(irq, cpu))
- return;
+ return -1;
dest = cpu_physical_id(cpu);
if (!iosapic_intr_info[irq].count)
- return; /* not an IOSAPIC interrupt */
+ return -1; /* not an IOSAPIC interrupt */
set_irq_affinity_info(irq, dest, redir);
@@ -376,7 +376,9 @@ iosapic_set_affinity(unsigned int irq, const struct cpumask *mask)
iosapic_write(iosapic, IOSAPIC_RTE_HIGH(rte_index), high32);
iosapic_write(iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
}
+
#endif
+ return 0;
}
/*
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index acc4d19..b448197 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -610,6 +610,9 @@ static struct irqaction ipi_irqaction = {
.name = "IPI"
};
+/*
+ * KVM uses this interrupt to force a cpu out of guest mode
+ */
static struct irqaction resched_irqaction = {
.handler = dummy_handler,
.flags = IRQF_DISABLED,
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 8f33a88..5b17bd4 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1829,8 +1829,7 @@ ia64_mca_cpu_init(void *cpu_data)
data = mca_bootmem();
first_time = 0;
} else
- data = page_address(alloc_pages_node(numa_node_id(),
- GFP_KERNEL, get_order(sz)));
+ data = __get_free_pages(GFP_KERNEL, get_order(sz));
if (!data)
panic("Could not allocate MCA memory for cpu %d\n",
cpu);
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index 2b15e23..0f8ade9 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -12,7 +12,7 @@
static struct irq_chip ia64_msi_chip;
#ifdef CONFIG_SMP
-static void ia64_set_msi_irq_affinity(unsigned int irq,
+static int ia64_set_msi_irq_affinity(unsigned int irq,
const cpumask_t *cpu_mask)
{
struct msi_msg msg;
@@ -20,10 +20,10 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
int cpu = first_cpu(*cpu_mask);
if (!cpu_online(cpu))
- return;
+ return -1;
if (irq_prepare_move(irq, cpu))
- return;
+ return -1;
read_msi_msg(irq, &msg);
@@ -39,6 +39,8 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
write_msi_msg(irq, &msg);
cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
+
+ return 0;
}
#endif /* CONFIG_SMP */
@@ -130,17 +132,17 @@ void arch_teardown_msi_irq(unsigned int irq)
#ifdef CONFIG_DMAR
#ifdef CONFIG_SMP
-static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
+static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
{
struct irq_cfg *cfg = irq_cfg + irq;
struct msi_msg msg;
int cpu = cpumask_first(mask);
if (!cpu_online(cpu))
- return;
+ return -1;
if (irq_prepare_move(irq, cpu))
- return;
+ return -1;
dmar_msi_read(irq, &msg);
@@ -151,6 +153,8 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
dmar_msi_write(irq, &msg);
cpumask_copy(irq_desc[irq].affinity, mask);
+
+ return 0;
}
#endif /* CONFIG_SMP */
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 8a06dc48..bdc176c 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -5595,7 +5595,7 @@ pfm_interrupt_handler(int irq, void *arg)
(*pfm_alt_intr_handler->handler)(irq, arg, regs);
}
- put_cpu_no_resched();
+ put_cpu();
return IRQ_HANDLED;
}
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index 8eff8c1..6ba72ab 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -98,7 +98,8 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
/* attempt to allocate a granule's worth of cached memory pages */
- page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+ page = alloc_pages_exact_node(nid,
+ GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
IA64_GRANULE_SHIFT-PAGE_SHIFT);
if (!page) {
mutex_unlock(&uc_pool->add_chunk_mutex);
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
index 0a2d6b8..64d5209 100644
--- a/arch/ia64/kvm/Kconfig
+++ b/arch/ia64/kvm/Kconfig
@@ -23,7 +23,7 @@ if VIRTUALIZATION
config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
- depends on HAVE_KVM && EXPERIMENTAL
+ depends on HAVE_KVM && MODULES && EXPERIMENTAL
# for device assignment:
depends on PCI
select PREEMPT_NOTIFIERS
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index d20a5db..80c57b0 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -41,6 +41,9 @@
#include <asm/div64.h>
#include <asm/tlb.h>
#include <asm/elf.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/clksupport.h>
+#include <asm/sn/shub_mmr.h>
#include "misc.h"
#include "vti.h"
@@ -65,6 +68,16 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ NULL }
};
+static unsigned long kvm_get_itc(struct kvm_vcpu *vcpu)
+{
+#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
+ if (vcpu->kvm->arch.is_sn2)
+ return rtc_time();
+ else
+#endif
+ return ia64_getreg(_IA64_REG_AR_ITC);
+}
+
static void kvm_flush_icache(unsigned long start, unsigned long len)
{
int l;
@@ -119,8 +132,7 @@ void kvm_arch_hardware_enable(void *garbage)
unsigned long saved_psr;
int slot;
- pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
- PAGE_KERNEL));
+ pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
local_irq_save(saved_psr);
slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
local_irq_restore(saved_psr);
@@ -283,6 +295,18 @@ static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
}
+static int __apic_accept_irq(struct kvm_vcpu *vcpu, uint64_t vector)
+{
+ struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
+
+ if (!test_and_set_bit(vector, &vpd->irr[0])) {
+ vcpu->arch.irq_new_pending = 1;
+ kvm_vcpu_kick(vcpu);
+ return 1;
+ }
+ return 0;
+}
+
/*
* offset: address offset to IPI space.
* value: deliver value.
@@ -292,20 +316,20 @@ static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm,
{
switch (dm) {
case SAPIC_FIXED:
- kvm_apic_set_irq(vcpu, vector, 0);
break;
case SAPIC_NMI:
- kvm_apic_set_irq(vcpu, 2, 0);
+ vector = 2;
break;
case SAPIC_EXTINT:
- kvm_apic_set_irq(vcpu, 0, 0);
+ vector = 0;
break;
case SAPIC_INIT:
case SAPIC_PMI:
default:
printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n");
- break;
+ return;
}
+ __apic_accept_irq(vcpu, vector);
}
static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
@@ -413,6 +437,23 @@ static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
return 1;
}
+static int kvm_sn2_setup_mappings(struct kvm_vcpu *vcpu)
+{
+ unsigned long pte, rtc_phys_addr, map_addr;
+ int slot;
+
+ map_addr = KVM_VMM_BASE + (1UL << KVM_VMM_SHIFT);
+ rtc_phys_addr = LOCAL_MMR_OFFSET | SH_RTC;
+ pte = pte_val(mk_pte_phys(rtc_phys_addr, PAGE_KERNEL_UC));
+ slot = ia64_itr_entry(0x3, map_addr, pte, PAGE_SHIFT);
+ vcpu->arch.sn_rtc_tr_slot = slot;
+ if (slot < 0) {
+ printk(KERN_ERR "Mayday mayday! RTC mapping failed!\n");
+ slot = 0;
+ }
+ return slot;
+}
+
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
{
@@ -426,7 +467,7 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
if (irqchip_in_kernel(vcpu->kvm)) {
- vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset;
+ vcpu_now_itc = kvm_get_itc(vcpu) + vcpu->arch.itc_offset;
if (time_after(vcpu_now_itc, vpd->itm)) {
vcpu->arch.timer_check = 1;
@@ -447,10 +488,10 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
hrtimer_cancel(p_ht);
vcpu->arch.ht_active = 0;
- if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
+ if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests) ||
+ kvm_cpu_has_pending_timer(vcpu))
if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
- vcpu->arch.mp_state =
- KVM_MP_STATE_RUNNABLE;
+ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
return -EINTR;
@@ -551,22 +592,35 @@ static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu)
if (r < 0)
goto out;
vcpu->arch.vm_tr_slot = r;
+
+#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
+ if (kvm->arch.is_sn2) {
+ r = kvm_sn2_setup_mappings(vcpu);
+ if (r < 0)
+ goto out;
+ }
+#endif
+
r = 0;
out:
return r;
-
}
static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu)
{
-
+ struct kvm *kvm = vcpu->kvm;
ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot);
ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot);
-
+#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
+ if (kvm->arch.is_sn2)
+ ia64_ptr_entry(0x3, vcpu->arch.sn_rtc_tr_slot);
+#endif
}
static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu)
{
+ unsigned long psr;
+ int r;
int cpu = smp_processor_id();
if (vcpu->arch.last_run_cpu != cpu ||
@@ -578,36 +632,27 @@ static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu)
vcpu->arch.host_rr6 = ia64_get_rr(RR6);
vti_set_rr6(vcpu->arch.vmm_rr);
- return kvm_insert_vmm_mapping(vcpu);
+ local_irq_save(psr);
+ r = kvm_insert_vmm_mapping(vcpu);
+ local_irq_restore(psr);
+ return r;
}
+
static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
{
kvm_purge_vmm_mapping(vcpu);
vti_set_rr6(vcpu->arch.host_rr6);
}
-static int vti_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
union context *host_ctx, *guest_ctx;
int r;
- /*Get host and guest context with guest address space.*/
- host_ctx = kvm_get_host_context(vcpu);
- guest_ctx = kvm_get_guest_context(vcpu);
-
- r = kvm_vcpu_pre_transition(vcpu);
- if (r < 0)
- goto out;
- kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
- kvm_vcpu_post_transition(vcpu);
- r = 0;
-out:
- return r;
-}
-
-static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- int r;
+ /*
+ * down_read() may sleep and return with interrupts enabled
+ */
+ down_read(&vcpu->kvm->slots_lock);
again:
if (signal_pending(current)) {
@@ -616,26 +661,31 @@ again:
goto out;
}
- /*
- * down_read() may sleep and return with interrupts enabled
- */
- down_read(&vcpu->kvm->slots_lock);
-
preempt_disable();
local_irq_disable();
- vcpu->guest_mode = 1;
+ /*Get host and guest context with guest address space.*/
+ host_ctx = kvm_get_host_context(vcpu);
+ guest_ctx = kvm_get_guest_context(vcpu);
+
+ clear_bit(KVM_REQ_KICK, &vcpu->requests);
+
+ r = kvm_vcpu_pre_transition(vcpu);
+ if (r < 0)
+ goto vcpu_run_fail;
+
+ up_read(&vcpu->kvm->slots_lock);
kvm_guest_enter();
- r = vti_vcpu_run(vcpu, kvm_run);
- if (r < 0) {
- local_irq_enable();
- preempt_enable();
- kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
- goto out;
- }
+
+ /*
+ * Transition to the guest
+ */
+ kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
+
+ kvm_vcpu_post_transition(vcpu);
vcpu->arch.launched = 1;
- vcpu->guest_mode = 0;
+ set_bit(KVM_REQ_KICK, &vcpu->requests);
local_irq_enable();
/*
@@ -646,9 +696,10 @@ again:
*/
barrier();
kvm_guest_exit();
- up_read(&vcpu->kvm->slots_lock);
preempt_enable();
+ down_read(&vcpu->kvm->slots_lock);
+
r = kvm_handle_exit(kvm_run, vcpu);
if (r > 0) {
@@ -657,12 +708,20 @@ again:
}
out:
+ up_read(&vcpu->kvm->slots_lock);
if (r > 0) {
kvm_resched(vcpu);
+ down_read(&vcpu->kvm->slots_lock);
goto again;
}
return r;
+
+vcpu_run_fail:
+ local_irq_enable();
+ preempt_enable();
+ kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ goto out;
}
static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
@@ -788,6 +847,9 @@ struct kvm *kvm_arch_create_vm(void)
if (IS_ERR(kvm))
return ERR_PTR(-ENOMEM);
+
+ kvm->arch.is_sn2 = ia64_platform_is("sn2");
+
kvm_init_vm(kvm);
kvm->arch.online_vcpus = 0;
@@ -884,7 +946,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
RESTORE_REGS(saved_gp);
vcpu->arch.irq_new_pending = 1;
- vcpu->arch.itc_offset = regs->saved_itc - ia64_getreg(_IA64_REG_AR_ITC);
+ vcpu->arch.itc_offset = regs->saved_itc - kvm_get_itc(vcpu);
set_bit(KVM_REQ_RESUME, &vcpu->requests);
vcpu_put(vcpu);
@@ -1043,10 +1105,6 @@ static void kvm_free_vmm_area(void)
}
}
-static void vti_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
-{
-}
-
static int vti_init_vpd(struct kvm_vcpu *vcpu)
{
int i;
@@ -1165,7 +1223,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
regs->cr_iip = PALE_RESET_ENTRY;
/*Initialize itc offset for vcpus*/
- itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC);
+ itc_offset = 0UL - kvm_get_itc(vcpu);
for (i = 0; i < kvm->arch.online_vcpus; i++) {
v = (struct kvm_vcpu *)((char *)vcpu +
sizeof(struct kvm_vcpu_data) * i);
@@ -1237,6 +1295,7 @@ static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
local_irq_save(psr);
r = kvm_insert_vmm_mapping(vcpu);
+ local_irq_restore(psr);
if (r)
goto fail;
r = kvm_vcpu_init(vcpu, vcpu->kvm, id);
@@ -1254,13 +1313,11 @@ static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
goto uninit;
kvm_purge_vmm_mapping(vcpu);
- local_irq_restore(psr);
return 0;
uninit:
kvm_vcpu_uninit(vcpu);
fail:
- local_irq_restore(psr);
return r;
}
@@ -1291,7 +1348,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->kvm = kvm;
cpu = get_cpu();
- vti_vcpu_load(vcpu, cpu);
r = vti_vcpu_setup(vcpu, id);
put_cpu();
@@ -1427,7 +1483,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
}
for (i = 0; i < 4; i++)
regs->insvc[i] = vcpu->arch.insvc[i];
- regs->saved_itc = vcpu->arch.itc_offset + ia64_getreg(_IA64_REG_AR_ITC);
+ regs->saved_itc = vcpu->arch.itc_offset + kvm_get_itc(vcpu);
SAVE_REGS(xtp);
SAVE_REGS(metaphysical_rr0);
SAVE_REGS(metaphysical_rr4);
@@ -1574,6 +1630,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
void kvm_arch_flush_shadow(struct kvm *kvm)
{
+ kvm_flush_remote_tlbs(kvm);
}
long kvm_arch_dev_ioctl(struct file *filp,
@@ -1616,8 +1673,37 @@ out:
return 0;
}
+
+/*
+ * On SN2, the ITC isn't stable, so copy in fast path code to use the
+ * SN2 RTC, replacing the ITC based default verion.
+ */
+static void kvm_patch_vmm(struct kvm_vmm_info *vmm_info,
+ struct module *module)
+{
+ unsigned long new_ar, new_ar_sn2;
+ unsigned long module_base;
+
+ if (!ia64_platform_is("sn2"))
+ return;
+
+ module_base = (unsigned long)module->module_core;
+
+ new_ar = kvm_vmm_base + vmm_info->patch_mov_ar - module_base;
+ new_ar_sn2 = kvm_vmm_base + vmm_info->patch_mov_ar_sn2 - module_base;
+
+ printk(KERN_INFO "kvm: Patching ITC emulation to use SGI SN2 RTC "
+ "as source\n");
+
+ /*
+ * Copy the SN2 version of mov_ar into place. They are both
+ * the same size, so 6 bundles is sufficient (6 * 0x10).
+ */
+ memcpy((void *)new_ar, (void *)new_ar_sn2, 0x60);
+}
+
static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info,
- struct module *module)
+ struct module *module)
{
unsigned long module_base;
unsigned long vmm_size;
@@ -1639,6 +1725,7 @@ static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info,
return -EFAULT;
memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size);
+ kvm_patch_vmm(vmm_info, module);
kvm_flush_icache(kvm_vmm_base, vmm_size);
/*Recalculate kvm_vmm_info based on new VMM*/
@@ -1792,38 +1879,24 @@ void kvm_arch_hardware_unsetup(void)
{
}
-static void vcpu_kick_intr(void *info)
-{
-#ifdef DEBUG
- struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
- printk(KERN_DEBUG"vcpu_kick_intr %p \n", vcpu);
-#endif
-}
-
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{
- int ipi_pcpu = vcpu->cpu;
- int cpu = get_cpu();
+ int me;
+ int cpu = vcpu->cpu;
if (waitqueue_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq);
- if (vcpu->guest_mode && cpu != ipi_pcpu)
- smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
+ me = get_cpu();
+ if (cpu != me && (unsigned) cpu < nr_cpu_ids && cpu_online(cpu))
+ if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests))
+ smp_send_reschedule(cpu);
put_cpu();
}
-int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
+int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq)
{
-
- struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
-
- if (!test_and_set_bit(vec, &vpd->irr[0])) {
- vcpu->arch.irq_new_pending = 1;
- kvm_vcpu_kick(vcpu);
- return 1;
- }
- return 0;
+ return __apic_accept_irq(vcpu, irq->vector);
}
int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
@@ -1836,20 +1909,18 @@ int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
return 0;
}
-struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
- unsigned long bitmap)
+int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
{
- struct kvm_vcpu *lvcpu = kvm->vcpus[0];
- int i;
-
- for (i = 1; i < kvm->arch.online_vcpus; i++) {
- if (!kvm->vcpus[i])
- continue;
- if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp)
- lvcpu = kvm->vcpus[i];
- }
+ return vcpu1->arch.xtp - vcpu2->arch.xtp;
+}
- return lvcpu;
+int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
+ int short_hand, int dest, int dest_mode)
+{
+ struct kvm_lapic *target = vcpu->arch.apic;
+ return (dest_mode == 0) ?
+ kvm_apic_match_physical_addr(target, dest) :
+ kvm_apic_match_logical_addr(target, dest);
}
static int find_highest_bits(int *dat)
@@ -1888,6 +1959,12 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
return 0;
}
+int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
+{
+ /* do real check here */
+ return 1;
+}
+
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
return vcpu->arch.timer_fired;
@@ -1918,6 +1995,7 @@ static int vcpu_reset(struct kvm_vcpu *vcpu)
long psr;
local_irq_save(psr);
r = kvm_insert_vmm_mapping(vcpu);
+ local_irq_restore(psr);
if (r)
goto fail;
@@ -1930,7 +2008,6 @@ static int vcpu_reset(struct kvm_vcpu *vcpu)
kvm_purge_vmm_mapping(vcpu);
r = 0;
fail:
- local_irq_restore(psr);
return r;
}
diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c
index a8ae52e..e4b8231 100644
--- a/arch/ia64/kvm/kvm_fw.c
+++ b/arch/ia64/kvm/kvm_fw.c
@@ -21,6 +21,9 @@
#include <linux/kvm_host.h>
#include <linux/smp.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/clksupport.h>
+#include <asm/sn/shub_mmr.h>
#include "vti.h"
#include "misc.h"
@@ -188,12 +191,35 @@ static struct ia64_pal_retval pal_freq_base(struct kvm_vcpu *vcpu)
return result;
}
-static struct ia64_pal_retval pal_freq_ratios(struct kvm_vcpu *vcpu)
+/*
+ * On the SGI SN2, the ITC isn't stable. Emulation backed by the SN2
+ * RTC is used instead. This function patches the ratios from SAL
+ * to match the RTC before providing them to the guest.
+ */
+static void sn2_patch_itc_freq_ratios(struct ia64_pal_retval *result)
{
+ struct pal_freq_ratio *ratio;
+ unsigned long sal_freq, sal_drift, factor;
+
+ result->status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
+ &sal_freq, &sal_drift);
+ ratio = (struct pal_freq_ratio *)&result->v2;
+ factor = ((sal_freq * 3) + (sn_rtc_cycles_per_second / 2)) /
+ sn_rtc_cycles_per_second;
+
+ ratio->num = 3;
+ ratio->den = factor;
+}
+static struct ia64_pal_retval pal_freq_ratios(struct kvm_vcpu *vcpu)
+{
struct ia64_pal_retval result;
PAL_CALL(result, PAL_FREQ_RATIOS, 0, 0, 0);
+
+ if (vcpu->kvm->arch.is_sn2)
+ sn2_patch_itc_freq_ratios(&result);
+
return result;
}
diff --git a/arch/ia64/kvm/lapic.h b/arch/ia64/kvm/lapic.h
index 6d6cbcb..ee541ce 100644
--- a/arch/ia64/kvm/lapic.h
+++ b/arch/ia64/kvm/lapic.h
@@ -20,6 +20,10 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu);
int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
-int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig);
+int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
+ int short_hand, int dest, int dest_mode);
+int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
+int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq);
+#define kvm_apic_present(x) (true)
#endif
diff --git a/arch/ia64/kvm/optvfault.S b/arch/ia64/kvm/optvfault.S
index 32254ce..f793be3 100644
--- a/arch/ia64/kvm/optvfault.S
+++ b/arch/ia64/kvm/optvfault.S
@@ -11,6 +11,7 @@
#include <asm/asmmacro.h>
#include <asm/processor.h>
+#include <asm/kvm_host.h>
#include "vti.h"
#include "asm-offsets.h"
@@ -140,6 +141,35 @@ GLOBAL_ENTRY(kvm_asm_mov_from_ar)
;;
END(kvm_asm_mov_from_ar)
+/*
+ * Special SGI SN2 optimized version of mov_from_ar using the SN2 RTC
+ * clock as it's source for emulating the ITC. This version will be
+ * copied on top of the original version if the host is determined to
+ * be an SN2.
+ */
+GLOBAL_ENTRY(kvm_asm_mov_from_ar_sn2)
+ add r18=VMM_VCPU_ITC_OFS_OFFSET, r21
+ movl r19 = (KVM_VMM_BASE+(1<<KVM_VMM_SHIFT))
+
+ add r16=VMM_VCPU_LAST_ITC_OFFSET,r21
+ extr.u r17=r25,6,7
+ mov r24=b0
+ ;;
+ ld8 r18=[r18]
+ ld8 r19=[r19]
+ addl r20=@gprel(asm_mov_to_reg),gp
+ ;;
+ add r19=r19,r18
+ shladd r17=r17,4,r20
+ ;;
+ adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20
+ st8 [r16] = r19
+ mov b0=r17
+ br.sptk.few b0
+ ;;
+END(kvm_asm_mov_from_ar_sn2)
+
+
// mov r1=rr[r3]
GLOBAL_ENTRY(kvm_asm_mov_from_rr)
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
index b1dc809..a8f84da 100644
--- a/arch/ia64/kvm/process.c
+++ b/arch/ia64/kvm/process.c
@@ -652,20 +652,25 @@ void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs,
unsigned long isr, unsigned long iim)
{
struct kvm_vcpu *v = current_vcpu;
+ long psr;
if (ia64_psr(regs)->cpl == 0) {
/* Allow hypercalls only when cpl = 0. */
if (iim == DOMN_PAL_REQUEST) {
+ local_irq_save(psr);
set_pal_call_data(v);
vmm_transition(v);
get_pal_call_result(v);
vcpu_increment_iip(v);
+ local_irq_restore(psr);
return;
} else if (iim == DOMN_SAL_REQUEST) {
+ local_irq_save(psr);
set_sal_call_data(v);
vmm_transition(v);
get_sal_call_result(v);
vcpu_increment_iip(v);
+ local_irq_restore(psr);
return;
}
}
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
index a18ee17..a2c6c15 100644
--- a/arch/ia64/kvm/vcpu.c
+++ b/arch/ia64/kvm/vcpu.c
@@ -788,13 +788,29 @@ void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
setfpreg(reg, val, regs); /* FIXME: handle NATs later*/
}
+/*
+ * The Altix RTC is mapped specially here for the vmm module
+ */
+#define SN_RTC_BASE (u64 *)(KVM_VMM_BASE+(1UL<<KVM_VMM_SHIFT))
+static long kvm_get_itc(struct kvm_vcpu *vcpu)
+{
+#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
+ struct kvm *kvm = (struct kvm *)KVM_VM_BASE;
+
+ if (kvm->arch.is_sn2)
+ return (*SN_RTC_BASE);
+ else
+#endif
+ return ia64_getreg(_IA64_REG_AR_ITC);
+}
+
/************************************************************************
* lsapic timer
***********************************************************************/
u64 vcpu_get_itc(struct kvm_vcpu *vcpu)
{
unsigned long guest_itc;
- guest_itc = VMX(vcpu, itc_offset) + ia64_getreg(_IA64_REG_AR_ITC);
+ guest_itc = VMX(vcpu, itc_offset) + kvm_get_itc(vcpu);
if (guest_itc >= VMX(vcpu, last_itc)) {
VMX(vcpu, last_itc) = guest_itc;
@@ -809,7 +825,7 @@ static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
struct kvm_vcpu *v;
struct kvm *kvm;
int i;
- long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC);
+ long itc_offset = val - kvm_get_itc(vcpu);
unsigned long vitv = VCPU(vcpu, itv);
kvm = (struct kvm *)KVM_VM_BASE;
diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c
index 9eee5c0..f4b4c89 100644
--- a/arch/ia64/kvm/vmm.c
+++ b/arch/ia64/kvm/vmm.c
@@ -30,15 +30,19 @@ MODULE_AUTHOR("Intel");
MODULE_LICENSE("GPL");
extern char kvm_ia64_ivt;
+extern char kvm_asm_mov_from_ar;
+extern char kvm_asm_mov_from_ar_sn2;
extern fpswa_interface_t *vmm_fpswa_interface;
long vmm_sanity = 1;
struct kvm_vmm_info vmm_info = {
- .module = THIS_MODULE,
- .vmm_entry = vmm_entry,
- .tramp_entry = vmm_trampoline,
- .vmm_ivt = (unsigned long)&kvm_ia64_ivt,
+ .module = THIS_MODULE,
+ .vmm_entry = vmm_entry,
+ .tramp_entry = vmm_trampoline,
+ .vmm_ivt = (unsigned long)&kvm_ia64_ivt,
+ .patch_mov_ar = (unsigned long)&kvm_asm_mov_from_ar,
+ .patch_mov_ar_sn2 = (unsigned long)&kvm_asm_mov_from_ar_sn2,
};
static int __init kvm_vmm_init(void)
diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S
index 3ef1a01..40920c6 100644
--- a/arch/ia64/kvm/vmm_ivt.S
+++ b/arch/ia64/kvm/vmm_ivt.S
@@ -95,7 +95,7 @@ GLOBAL_ENTRY(kvm_vmm_panic)
;;
srlz.i // guarantee that interruption collection is on
;;
- //(p15) ssm psr.i // restore psr.i
+ (p15) ssm psr.i // restore psr.
addl r14=@gprel(ia64_leave_hypervisor),gp
;;
KVM_SAVE_REST
@@ -249,7 +249,7 @@ ENTRY(kvm_break_fault)
;;
srlz.i // guarantee that interruption collection is on
;;
- //(p15)ssm psr.i // restore psr.i
+ (p15)ssm psr.i // restore psr.i
addl r14=@gprel(ia64_leave_hypervisor),gp
;;
KVM_SAVE_REST
@@ -439,7 +439,7 @@ kvm_dispatch_vexirq:
;;
srlz.i // guarantee that interruption collection is on
;;
- //(p15) ssm psr.i // restore psr.i
+ (p15) ssm psr.i // restore psr.i
adds r3=8,r2 // set up second base pointer
;;
KVM_SAVE_REST
@@ -819,7 +819,7 @@ ENTRY(kvm_dtlb_miss_dispatch)
;;
srlz.i // guarantee that interruption collection is on
;;
- //(p15) ssm psr.i // restore psr.i
+ (p15) ssm psr.i // restore psr.i
addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
;;
KVM_SAVE_REST
@@ -842,7 +842,7 @@ ENTRY(kvm_itlb_miss_dispatch)
;;
srlz.i // guarantee that interruption collection is on
;;
- //(p15) ssm psr.i // restore psr.i
+ (p15) ssm psr.i // restore psr.i
addl r14=@gprel(ia64_leave_hypervisor),gp
;;
KVM_SAVE_REST
@@ -871,7 +871,7 @@ ENTRY(kvm_dispatch_reflection)
;;
srlz.i // guarantee that interruption collection is on
;;
- //(p15) ssm psr.i // restore psr.i
+ (p15) ssm psr.i // restore psr.i
addl r14=@gprel(ia64_leave_hypervisor),gp
;;
KVM_SAVE_REST
@@ -898,7 +898,7 @@ ENTRY(kvm_dispatch_virtualization_fault)
;;
srlz.i // guarantee that interruption collection is on
;;
- //(p15) ssm psr.i // restore psr.i
+ (p15) ssm psr.i // restore psr.i
addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
;;
KVM_SAVE_REST
@@ -920,7 +920,7 @@ ENTRY(kvm_dispatch_interrupt)
;;
srlz.i
;;
- //(p15) ssm psr.i
+ (p15) ssm psr.i
addl r14=@gprel(ia64_leave_hypervisor),gp
;;
KVM_SAVE_REST
@@ -1333,7 +1333,7 @@ hostret = r24
;;
(p7) srlz.i
;;
-//(p6) ssm psr.i
+(p6) ssm psr.i
;;
mov rp=rpsave
mov ar.pfs=pfssave
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 2c2501f..4290a42 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -254,7 +254,8 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte)
"(p7) st8 [%2]=r9;;"
"ssm psr.ic;;"
"srlz.d;;"
- /* "ssm psr.i;;" Once interrupts in vmm open, need fix*/
+ "ssm psr.i;;"
+ "srlz.d;;"
: "=r"(ret) : "r"(iha), "r"(pte):"memory");
return ret;
diff --git a/arch/ia64/mm/extable.c b/arch/ia64/mm/extable.c
index 71c50dd..c99a41e 100644
--- a/arch/ia64/mm/extable.c
+++ b/arch/ia64/mm/extable.c
@@ -8,7 +8,7 @@
#include <linux/sort.h>
#include <asm/uaccess.h>
-#include <asm/module.h>
+#include <linux/module.h>
static int cmp_ex(const void *a, const void *b)
{
@@ -53,6 +53,32 @@ void sort_extable (struct exception_table_entry *start,
cmp_ex, swap_ex);
}
+static inline unsigned long ex_to_addr(const struct exception_table_entry *x)
+{
+ return (unsigned long)&x->addr + x->addr;
+}
+
+#ifdef CONFIG_MODULES
+/*
+ * Any entry referring to the module init will be at the beginning or
+ * the end.
+ */
+void trim_init_extable(struct module *m)
+{
+ /*trim the beginning*/
+ while (m->num_exentries &&
+ within_module_init(ex_to_addr(&m->extable[0]), m)) {
+ m->extable++;
+ m->num_exentries--;
+ }
+ /*trim the end*/
+ while (m->num_exentries &&
+ within_module_init(ex_to_addr(&m->extable[m->num_exentries-1]),
+ m))
+ m->num_exentries--;
+}
+#endif /* CONFIG_MODULES */
+
const struct exception_table_entry *
search_extable (const struct exception_table_entry *first,
const struct exception_table_entry *last,
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index 66fd705..764f26a 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -227,7 +227,7 @@ finish_up:
return new_irq_info;
}
-static void sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask)
+static int sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask)
{
struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
nasid_t nasid;
@@ -239,6 +239,8 @@ static void sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask)
list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
sn_irq_lh[irq], list)
(void)sn_retarget_vector(sn_irq_info, nasid, slice);
+
+ return 0;
}
#ifdef CONFIG_SMP
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c
index 81e4289..fbbfb97 100644
--- a/arch/ia64/sn/kernel/msi_sn.c
+++ b/arch/ia64/sn/kernel/msi_sn.c
@@ -151,7 +151,7 @@ int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry)
}
#ifdef CONFIG_SMP
-static void sn_set_msi_irq_affinity(unsigned int irq,
+static int sn_set_msi_irq_affinity(unsigned int irq,
const struct cpumask *cpu_mask)
{
struct msi_msg msg;
@@ -168,7 +168,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq,
cpu = cpumask_first(cpu_mask);
sn_irq_info = sn_msi_info[irq].sn_irq_info;
if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
- return;
+ return -1;
/*
* Release XIO resources for the old MSI PCI address
@@ -189,7 +189,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq,
new_irq_info = sn_retarget_vector(sn_irq_info, nasid, slice);
sn_msi_info[irq].sn_irq_info = new_irq_info;
if (new_irq_info == NULL)
- return;
+ return -1;
/*
* Map the xio address into bus space
@@ -206,6 +206,8 @@ static void sn_set_msi_irq_affinity(unsigned int irq,
write_msi_msg(irq, &msg);
cpumask_copy(irq_desc[irq].affinity, cpu_mask);
+
+ return 0;
}
#endif /* CONFIG_SMP */
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index d876423..98b6849 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -90,7 +90,8 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
*/
node = pcibus_to_node(pdev->bus);
if (likely(node >=0)) {
- struct page *p = alloc_pages_node(node, flags, get_order(size));
+ struct page *p = alloc_pages_exact_node(node,
+ flags, get_order(size));
if (likely(p))
cpuaddr = page_address(p);