diff options
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/arm/arch_timer.c | 35 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-init.c | 31 | ||||
-rw-r--r-- | virt/kvm/irqchip.c | 7 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 108 |
4 files changed, 100 insertions, 81 deletions
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index e2d5b6f..4fde8c7 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -405,26 +405,17 @@ u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) return (u64)-1; } -static int kvm_timer_cpu_notify(struct notifier_block *self, - unsigned long action, void *cpu) +static int kvm_timer_starting_cpu(unsigned int cpu) { - switch (action) { - case CPU_STARTING: - case CPU_STARTING_FROZEN: - kvm_timer_init_interrupt(NULL); - break; - case CPU_DYING: - case CPU_DYING_FROZEN: - disable_percpu_irq(host_vtimer_irq); - break; - } - - return NOTIFY_OK; + kvm_timer_init_interrupt(NULL); + return 0; } -static struct notifier_block kvm_timer_cpu_nb = { - .notifier_call = kvm_timer_cpu_notify, -}; +static int kvm_timer_dying_cpu(unsigned int cpu) +{ + disable_percpu_irq(host_vtimer_irq); + return 0; +} int kvm_timer_hyp_init(void) { @@ -449,12 +440,6 @@ int kvm_timer_hyp_init(void) goto out; } - err = __register_cpu_notifier(&kvm_timer_cpu_nb); - if (err) { - kvm_err("Cannot register timer CPU notifier\n"); - goto out_free; - } - wqueue = create_singlethread_workqueue("kvm_arch_timer"); if (!wqueue) { err = -ENOMEM; @@ -462,8 +447,10 @@ int kvm_timer_hyp_init(void) } kvm_info("virtual timer IRQ%d\n", host_vtimer_irq); - on_each_cpu(kvm_timer_init_interrupt, NULL, 1); + cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING, + "AP_KVM_ARM_TIMER_STARTING", kvm_timer_starting_cpu, + kvm_timer_dying_cpu); goto out; out_free: free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus()); diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index 1aba785..fb4b0a7 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -364,32 +364,19 @@ out: /* GENERIC PROBE */ -static void vgic_init_maintenance_interrupt(void *info) +static int vgic_init_cpu_starting(unsigned int cpu) { enable_percpu_irq(kvm_vgic_global_state.maint_irq, 0); + return 0; } -static int vgic_cpu_notify(struct notifier_block *self, - unsigned long action, void *cpu) -{ - switch (action) { - case CPU_STARTING: - case CPU_STARTING_FROZEN: - vgic_init_maintenance_interrupt(NULL); - break; - case CPU_DYING: - case CPU_DYING_FROZEN: - disable_percpu_irq(kvm_vgic_global_state.maint_irq); - break; - } - return NOTIFY_OK; +static int vgic_init_cpu_dying(unsigned int cpu) +{ + disable_percpu_irq(kvm_vgic_global_state.maint_irq); + return 0; } -static struct notifier_block vgic_cpu_nb = { - .notifier_call = vgic_cpu_notify, -}; - static irqreturn_t vgic_maintenance_handler(int irq, void *data) { /* @@ -445,14 +432,14 @@ int kvm_vgic_hyp_init(void) return ret; } - ret = __register_cpu_notifier(&vgic_cpu_nb); + ret = cpuhp_setup_state(CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING, + "AP_KVM_ARM_VGIC_INIT_STARTING", + vgic_init_cpu_starting, vgic_init_cpu_dying); if (ret) { kvm_err("Cannot register vgic CPU notifier\n"); goto out_free_irq; } - on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1); - kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq); return 0; diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c index c620219..3bcc999 100644 --- a/virt/kvm/irqchip.c +++ b/virt/kvm/irqchip.c @@ -137,7 +137,8 @@ void kvm_free_irq_routing(struct kvm *kvm) free_irq_routing_table(rt); } -static int setup_routing_entry(struct kvm_irq_routing_table *rt, +static int setup_routing_entry(struct kvm *kvm, + struct kvm_irq_routing_table *rt, struct kvm_kernel_irq_routing_entry *e, const struct kvm_irq_routing_entry *ue) { @@ -156,7 +157,7 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt, e->gsi = ue->gsi; e->type = ue->type; - r = kvm_set_routing_entry(e, ue); + r = kvm_set_routing_entry(kvm, e, ue); if (r) goto out; if (e->type == KVM_IRQ_ROUTING_IRQCHIP) @@ -218,7 +219,7 @@ int kvm_set_irq_routing(struct kvm *kvm, goto free_entry; break; } - r = setup_routing_entry(new, e, ue); + r = setup_routing_entry(kvm, new, e, ue); if (r) goto free_entry; ++ue; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index bd2eb92..cc081cc 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -148,6 +148,7 @@ int vcpu_load(struct kvm_vcpu *vcpu) put_cpu(); return 0; } +EXPORT_SYMBOL_GPL(vcpu_load); void vcpu_put(struct kvm_vcpu *vcpu) { @@ -157,6 +158,7 @@ void vcpu_put(struct kvm_vcpu *vcpu) preempt_enable(); mutex_unlock(&vcpu->mutex); } +EXPORT_SYMBOL_GPL(vcpu_put); static void ack_flush(void *_completed) { @@ -1442,6 +1444,52 @@ static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) return true; } +static int hva_to_pfn_remapped(struct vm_area_struct *vma, + unsigned long addr, bool *async, + bool write_fault, kvm_pfn_t *p_pfn) +{ + unsigned long pfn; + int r; + + r = follow_pfn(vma, addr, &pfn); + if (r) { + /* + * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does + * not call the fault handler, so do it here. + */ + bool unlocked = false; + r = fixup_user_fault(current, current->mm, addr, + (write_fault ? FAULT_FLAG_WRITE : 0), + &unlocked); + if (unlocked) + return -EAGAIN; + if (r) + return r; + + r = follow_pfn(vma, addr, &pfn); + if (r) + return r; + + } + + + /* + * Get a reference here because callers of *hva_to_pfn* and + * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the + * returned pfn. This is only needed if the VMA has VM_MIXEDMAP + * set, but the kvm_get_pfn/kvm_release_pfn_clean pair will + * simply do nothing for reserved pfns. + * + * Whoever called remap_pfn_range is also going to call e.g. + * unmap_mapping_range before the underlying pages are freed, + * causing a call to our MMU notifier. + */ + kvm_get_pfn(pfn); + + *p_pfn = pfn; + return 0; +} + /* * Pin guest page in memory and return its pfn. * @addr: host virtual address which maps memory to the guest @@ -1461,7 +1509,7 @@ static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, { struct vm_area_struct *vma; kvm_pfn_t pfn = 0; - int npages; + int npages, r; /* we can do it either atomically or asynchronously, not both */ BUG_ON(atomic && async); @@ -1483,14 +1531,17 @@ static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, goto exit; } +retry: vma = find_vma_intersection(current->mm, addr, addr + 1); if (vma == NULL) pfn = KVM_PFN_ERR_FAULT; - else if ((vma->vm_flags & VM_PFNMAP)) { - pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + - vma->vm_pgoff; - BUG_ON(!kvm_is_reserved_pfn(pfn)); + else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) { + r = hva_to_pfn_remapped(vma, addr, async, write_fault, &pfn); + if (r == -EAGAIN) + goto retry; + if (r < 0) + pfn = KVM_PFN_ERR_FAULT; } else { if (async && vma_is_valid(vma, write_fault)) *async = true; @@ -2948,7 +2999,7 @@ static long kvm_vm_ioctl(struct file *filp, if (copy_from_user(&routing, argp, sizeof(routing))) goto out; r = -EINVAL; - if (routing.nr >= KVM_MAX_IRQ_ROUTES) + if (routing.nr > KVM_MAX_IRQ_ROUTES) goto out; if (routing.flags) goto out; @@ -3055,6 +3106,7 @@ static int kvm_dev_ioctl_create_vm(unsigned long type) { int r; struct kvm *kvm; + struct file *file; kvm = kvm_create_vm(type); if (IS_ERR(kvm)) @@ -3066,17 +3118,25 @@ static int kvm_dev_ioctl_create_vm(unsigned long type) return r; } #endif - r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR | O_CLOEXEC); + r = get_unused_fd_flags(O_CLOEXEC); if (r < 0) { kvm_put_kvm(kvm); return r; } + file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); + if (IS_ERR(file)) { + put_unused_fd(r); + kvm_put_kvm(kvm); + return PTR_ERR(file); + } if (kvm_create_vm_debugfs(kvm, r) < 0) { - kvm_put_kvm(kvm); + put_unused_fd(r); + fput(file); return -ENOMEM; } + fd_install(r, file); return r; } @@ -3151,12 +3211,13 @@ static void hardware_enable_nolock(void *junk) } } -static void hardware_enable(void) +static int kvm_starting_cpu(unsigned int cpu) { raw_spin_lock(&kvm_count_lock); if (kvm_usage_count) hardware_enable_nolock(NULL); raw_spin_unlock(&kvm_count_lock); + return 0; } static void hardware_disable_nolock(void *junk) @@ -3169,12 +3230,13 @@ static void hardware_disable_nolock(void *junk) kvm_arch_hardware_disable(); } -static void hardware_disable(void) +static int kvm_dying_cpu(unsigned int cpu) { raw_spin_lock(&kvm_count_lock); if (kvm_usage_count) hardware_disable_nolock(NULL); raw_spin_unlock(&kvm_count_lock); + return 0; } static void hardware_disable_all_nolock(void) @@ -3215,21 +3277,6 @@ static int hardware_enable_all(void) return r; } -static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, - void *v) -{ - val &= ~CPU_TASKS_FROZEN; - switch (val) { - case CPU_DYING: - hardware_disable(); - break; - case CPU_STARTING: - hardware_enable(); - break; - } - return NOTIFY_OK; -} - static int kvm_reboot(struct notifier_block *notifier, unsigned long val, void *v) { @@ -3520,10 +3567,6 @@ out_unlock: } EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev); -static struct notifier_block kvm_cpu_notifier = { - .notifier_call = kvm_cpu_hotplug, -}; - static int kvm_debugfs_open(struct inode *inode, struct file *file, int (*get)(void *, u64 *), int (*set)(void *, u64), const char *fmt) @@ -3774,7 +3817,8 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, goto out_free_1; } - r = register_cpu_notifier(&kvm_cpu_notifier); + r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "AP_KVM_STARTING", + kvm_starting_cpu, kvm_dying_cpu); if (r) goto out_free_2; register_reboot_notifier(&kvm_reboot_notifier); @@ -3828,7 +3872,7 @@ out_free: kmem_cache_destroy(kvm_vcpu_cache); out_free_3: unregister_reboot_notifier(&kvm_reboot_notifier); - unregister_cpu_notifier(&kvm_cpu_notifier); + cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); out_free_2: out_free_1: kvm_arch_hardware_unsetup(); @@ -3851,7 +3895,7 @@ void kvm_exit(void) kvm_async_pf_deinit(); unregister_syscore_ops(&kvm_syscore_ops); unregister_reboot_notifier(&kvm_reboot_notifier); - unregister_cpu_notifier(&kvm_cpu_notifier); + cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); on_each_cpu(hardware_disable_nolock, NULL, 1); kvm_arch_hardware_unsetup(); kvm_arch_exit(); |