From e5fd91f1ef340da553f7a79da9540c3db711c937 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Fabian=20Silva=20Delgado?= Date: Tue, 8 Sep 2015 01:01:14 -0300 Subject: Linux-libre 4.2-gnu --- virt/kvm/arm/vgic-v3-emul.c | 56 +++++- virt/kvm/arm/vgic.c | 5 - virt/kvm/async_pf.h | 4 +- virt/kvm/coalesced_mmio.h | 4 +- virt/kvm/irqchip.c | 41 ++++- virt/kvm/kvm_main.c | 435 +++++++++++++++++++++++++++++--------------- virt/kvm/vfio.c | 5 + 7 files changed, 382 insertions(+), 168 deletions(-) (limited to 'virt') diff --git a/virt/kvm/arm/vgic-v3-emul.c b/virt/kvm/arm/vgic-v3-emul.c index e9c3a7a83..e661e7fb9 100644 --- a/virt/kvm/arm/vgic-v3-emul.c +++ b/virt/kvm/arm/vgic-v3-emul.c @@ -76,8 +76,6 @@ static bool handle_mmio_ctlr(struct kvm_vcpu *vcpu, vgic_reg_access(mmio, ®, offset, ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); if (mmio->is_write) { - if (reg & GICD_CTLR_ENABLE_SS_G0) - kvm_info("guest tried to enable unsupported Group0 interrupts\n"); vcpu->kvm->arch.vgic.enabled = !!(reg & GICD_CTLR_ENABLE_SS_G1); vgic_update_state(vcpu->kvm); return true; @@ -173,6 +171,32 @@ static bool handle_mmio_clear_pending_reg_dist(struct kvm_vcpu *vcpu, return false; } +static bool handle_mmio_set_active_reg_dist(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8)) + return vgic_handle_set_active_reg(vcpu->kvm, mmio, offset, + vcpu->vcpu_id); + + vgic_reg_access(mmio, NULL, offset, + ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED); + return false; +} + +static bool handle_mmio_clear_active_reg_dist(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8)) + return vgic_handle_clear_active_reg(vcpu->kvm, mmio, offset, + vcpu->vcpu_id); + + vgic_reg_access(mmio, NULL, offset, + ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED); + return false; +} + static bool handle_mmio_priority_reg_dist(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, phys_addr_t offset) @@ -428,13 +452,13 @@ static const struct vgic_io_range vgic_v3_dist_ranges[] = { .base = GICD_ISACTIVER, .len = 0x80, .bits_per_irq = 1, - .handle_mmio = handle_mmio_raz_wi, + .handle_mmio = handle_mmio_set_active_reg_dist, }, { .base = GICD_ICACTIVER, .len = 0x80, .bits_per_irq = 1, - .handle_mmio = handle_mmio_raz_wi, + .handle_mmio = handle_mmio_clear_active_reg_dist, }, { .base = GICD_IPRIORITYR, @@ -561,6 +585,26 @@ static bool handle_mmio_clear_enable_reg_redist(struct kvm_vcpu *vcpu, ACCESS_WRITE_CLEARBIT); } +static bool handle_mmio_set_active_reg_redist(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + struct kvm_vcpu *redist_vcpu = mmio->private; + + return vgic_handle_set_active_reg(vcpu->kvm, mmio, offset, + redist_vcpu->vcpu_id); +} + +static bool handle_mmio_clear_active_reg_redist(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + struct kvm_vcpu *redist_vcpu = mmio->private; + + return vgic_handle_clear_active_reg(vcpu->kvm, mmio, offset, + redist_vcpu->vcpu_id); +} + static bool handle_mmio_set_pending_reg_redist(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, phys_addr_t offset) @@ -674,13 +718,13 @@ static const struct vgic_io_range vgic_redist_ranges[] = { .base = SGI_base(GICR_ISACTIVER0), .len = 0x04, .bits_per_irq = 1, - .handle_mmio = handle_mmio_raz_wi, + .handle_mmio = handle_mmio_set_active_reg_redist, }, { .base = SGI_base(GICR_ICACTIVER0), .len = 0x04, .bits_per_irq = 1, - .handle_mmio = handle_mmio_raz_wi, + .handle_mmio = handle_mmio_clear_active_reg_redist, }, { .base = SGI_base(GICR_IPRIORITYR0), diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 950064a09..bc40137a0 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -26,8 +26,6 @@ #include #include -#include - #include #include #include @@ -2128,9 +2126,6 @@ int kvm_vgic_hyp_init(void) goto out_free_irq; } - /* Callback into for arch code for setup */ - vgic_arch_setup(vgic); - on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1); return 0; diff --git a/virt/kvm/async_pf.h b/virt/kvm/async_pf.h index e7ef6447c..ec4cfa278 100644 --- a/virt/kvm/async_pf.h +++ b/virt/kvm/async_pf.h @@ -29,8 +29,8 @@ void kvm_async_pf_deinit(void); void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu); #else #define kvm_async_pf_init() (0) -#define kvm_async_pf_deinit() do{}while(0) -#define kvm_async_pf_vcpu_init(C) do{}while(0) +#define kvm_async_pf_deinit() do {} while (0) +#define kvm_async_pf_vcpu_init(C) do {} while (0) #endif #endif diff --git a/virt/kvm/coalesced_mmio.h b/virt/kvm/coalesced_mmio.h index b280c2044..5cbf190d2 100644 --- a/virt/kvm/coalesced_mmio.h +++ b/virt/kvm/coalesced_mmio.h @@ -24,9 +24,9 @@ struct kvm_coalesced_mmio_dev { int kvm_coalesced_mmio_init(struct kvm *kvm); void kvm_coalesced_mmio_free(struct kvm *kvm); int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, - struct kvm_coalesced_mmio_zone *zone); + struct kvm_coalesced_mmio_zone *zone); int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, - struct kvm_coalesced_mmio_zone *zone); + struct kvm_coalesced_mmio_zone *zone); #else diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c index 1d56a901e..21c14244f 100644 --- a/virt/kvm/irqchip.c +++ b/virt/kvm/irqchip.c @@ -33,7 +33,6 @@ struct kvm_irq_routing_table { int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS]; - struct kvm_kernel_irq_routing_entry *rt_entries; u32 nr_rt_entries; /* * Array indexed by gsi. Each entry contains list of irq chips @@ -118,11 +117,32 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, return ret; } +static void free_irq_routing_table(struct kvm_irq_routing_table *rt) +{ + int i; + + if (!rt) + return; + + for (i = 0; i < rt->nr_rt_entries; ++i) { + struct kvm_kernel_irq_routing_entry *e; + struct hlist_node *n; + + hlist_for_each_entry_safe(e, n, &rt->map[i], link) { + hlist_del(&e->link); + kfree(e); + } + } + + kfree(rt); +} + void kvm_free_irq_routing(struct kvm *kvm) { /* Called only during vm destruction. Nobody can use the pointer at this stage */ - kfree(kvm->irq_routing); + struct kvm_irq_routing_table *rt = rcu_access_pointer(kvm->irq_routing); + free_irq_routing_table(rt); } static int setup_routing_entry(struct kvm_irq_routing_table *rt, @@ -173,25 +193,29 @@ int kvm_set_irq_routing(struct kvm *kvm, nr_rt_entries += 1; - new = kzalloc(sizeof(*new) + (nr_rt_entries * sizeof(struct hlist_head)) - + (nr * sizeof(struct kvm_kernel_irq_routing_entry)), + new = kzalloc(sizeof(*new) + (nr_rt_entries * sizeof(struct hlist_head)), GFP_KERNEL); if (!new) return -ENOMEM; - new->rt_entries = (void *)&new->map[nr_rt_entries]; - new->nr_rt_entries = nr_rt_entries; for (i = 0; i < KVM_NR_IRQCHIPS; i++) for (j = 0; j < KVM_IRQCHIP_NUM_PINS; j++) new->chip[i][j] = -1; for (i = 0; i < nr; ++i) { + struct kvm_kernel_irq_routing_entry *e; + + r = -ENOMEM; + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) + goto out; + r = -EINVAL; if (ue->flags) goto out; - r = setup_routing_entry(new, &new->rt_entries[i], ue); + r = setup_routing_entry(new, e, ue); if (r) goto out; ++ue; @@ -209,6 +233,7 @@ int kvm_set_irq_routing(struct kvm *kvm, r = 0; out: - kfree(new); + free_irq_routing_table(new); + return r; } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 90977418a..8b8a44453 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -103,8 +103,7 @@ static void hardware_disable_all(void); static void kvm_io_bus_destroy(struct kvm_io_bus *bus); static void kvm_release_pfn_dirty(pfn_t pfn); -static void mark_page_dirty_in_slot(struct kvm *kvm, - struct kvm_memory_slot *memslot, gfn_t gfn); +static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn); __visible bool kvm_rebooting; EXPORT_SYMBOL_GPL(kvm_rebooting); @@ -440,13 +439,60 @@ static int kvm_init_mmu_notifier(struct kvm *kvm) #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ -static void kvm_init_memslots_id(struct kvm *kvm) +static struct kvm_memslots *kvm_alloc_memslots(void) { int i; - struct kvm_memslots *slots = kvm->memslots; + struct kvm_memslots *slots; + slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); + if (!slots) + return NULL; + + /* + * Init kvm generation close to the maximum to easily test the + * code of handling generation number wrap-around. + */ + slots->generation = -150; for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) slots->id_to_index[i] = slots->memslots[i].id = i; + + return slots; +} + +static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) +{ + if (!memslot->dirty_bitmap) + return; + + kvfree(memslot->dirty_bitmap); + memslot->dirty_bitmap = NULL; +} + +/* + * Free any memory in @free but not in @dont. + */ +static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, + struct kvm_memory_slot *dont) +{ + if (!dont || free->dirty_bitmap != dont->dirty_bitmap) + kvm_destroy_dirty_bitmap(free); + + kvm_arch_free_memslot(kvm, free, dont); + + free->npages = 0; +} + +static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) +{ + struct kvm_memory_slot *memslot; + + if (!slots) + return; + + kvm_for_each_memslot(memslot, slots) + kvm_free_memslot(kvm, memslot, NULL); + + kvfree(slots); } static struct kvm *kvm_create_vm(unsigned long type) @@ -472,17 +518,12 @@ static struct kvm *kvm_create_vm(unsigned long type) BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); r = -ENOMEM; - kvm->memslots = kvm_kvzalloc(sizeof(struct kvm_memslots)); - if (!kvm->memslots) - goto out_err_no_srcu; - - /* - * Init kvm generation close to the maximum to easily test the - * code of handling generation number wrap-around. - */ - kvm->memslots->generation = -150; + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { + kvm->memslots[i] = kvm_alloc_memslots(); + if (!kvm->memslots[i]) + goto out_err_no_srcu; + } - kvm_init_memslots_id(kvm); if (init_srcu_struct(&kvm->srcu)) goto out_err_no_srcu; if (init_srcu_struct(&kvm->irq_srcu)) @@ -512,6 +553,8 @@ static struct kvm *kvm_create_vm(unsigned long type) list_add(&kvm->vm_list, &vm_list); spin_unlock(&kvm_lock); + preempt_notifier_inc(); + return kvm; out_err: @@ -523,7 +566,8 @@ out_err_no_srcu: out_err_no_disable: for (i = 0; i < KVM_NR_BUSES; i++) kfree(kvm->buses[i]); - kvfree(kvm->memslots); + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) + kvm_free_memslots(kvm, kvm->memslots[i]); kvm_arch_free_vm(kvm); return ERR_PTR(r); } @@ -540,40 +584,6 @@ void *kvm_kvzalloc(unsigned long size) return kzalloc(size, GFP_KERNEL); } -static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) -{ - if (!memslot->dirty_bitmap) - return; - - kvfree(memslot->dirty_bitmap); - memslot->dirty_bitmap = NULL; -} - -/* - * Free any memory in @free but not in @dont. - */ -static void kvm_free_physmem_slot(struct kvm *kvm, struct kvm_memory_slot *free, - struct kvm_memory_slot *dont) -{ - if (!dont || free->dirty_bitmap != dont->dirty_bitmap) - kvm_destroy_dirty_bitmap(free); - - kvm_arch_free_memslot(kvm, free, dont); - - free->npages = 0; -} - -static void kvm_free_physmem(struct kvm *kvm) -{ - struct kvm_memslots *slots = kvm->memslots; - struct kvm_memory_slot *memslot; - - kvm_for_each_memslot(memslot, slots) - kvm_free_physmem_slot(kvm, memslot, NULL); - - kvfree(kvm->memslots); -} - static void kvm_destroy_devices(struct kvm *kvm) { struct list_head *node, *tmp; @@ -607,10 +617,12 @@ static void kvm_destroy_vm(struct kvm *kvm) #endif kvm_arch_destroy_vm(kvm); kvm_destroy_devices(kvm); - kvm_free_physmem(kvm); + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) + kvm_free_memslots(kvm, kvm->memslots[i]); cleanup_srcu_struct(&kvm->irq_srcu); cleanup_srcu_struct(&kvm->srcu); kvm_arch_free_vm(kvm); + preempt_notifier_dec(); hardware_disable_all(); mmdrop(mm); } @@ -670,8 +682,6 @@ static void update_memslots(struct kvm_memslots *slots, WARN_ON(mslots[i].id != id); if (!new->npages) { WARN_ON(!mslots[i].npages); - new->base_gfn = 0; - new->flags = 0; if (mslots[i].npages) slots->used_slots--; } else { @@ -711,7 +721,7 @@ static void update_memslots(struct kvm_memslots *slots, slots->id_to_index[mslots[i].id] = i; } -static int check_memory_region_flags(struct kvm_userspace_memory_region *mem) +static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem) { u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; @@ -726,9 +736,9 @@ static int check_memory_region_flags(struct kvm_userspace_memory_region *mem) } static struct kvm_memslots *install_new_memslots(struct kvm *kvm, - struct kvm_memslots *slots) + int as_id, struct kvm_memslots *slots) { - struct kvm_memslots *old_memslots = kvm->memslots; + struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id); /* * Set the low bit in the generation, which disables SPTE caching @@ -737,7 +747,7 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm, WARN_ON(old_memslots->generation & 1); slots->generation = old_memslots->generation + 1; - rcu_assign_pointer(kvm->memslots, slots); + rcu_assign_pointer(kvm->memslots[as_id], slots); synchronize_srcu_expedited(&kvm->srcu); /* @@ -747,7 +757,7 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm, */ slots->generation++; - kvm_arch_memslots_updated(kvm); + kvm_arch_memslots_updated(kvm, slots); return old_memslots; } @@ -761,7 +771,7 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm, * Must be called holding kvm->slots_lock for write. */ int __kvm_set_memory_region(struct kvm *kvm, - struct kvm_userspace_memory_region *mem) + const struct kvm_userspace_memory_region *mem) { int r; gfn_t base_gfn; @@ -769,6 +779,7 @@ int __kvm_set_memory_region(struct kvm *kvm, struct kvm_memory_slot *slot; struct kvm_memory_slot old, new; struct kvm_memslots *slots = NULL, *old_memslots; + int as_id, id; enum kvm_mr_change change; r = check_memory_region_flags(mem); @@ -776,36 +787,36 @@ int __kvm_set_memory_region(struct kvm *kvm, goto out; r = -EINVAL; + as_id = mem->slot >> 16; + id = (u16)mem->slot; + /* General sanity checks */ if (mem->memory_size & (PAGE_SIZE - 1)) goto out; if (mem->guest_phys_addr & (PAGE_SIZE - 1)) goto out; /* We can read the guest memory with __xxx_user() later on. */ - if ((mem->slot < KVM_USER_MEM_SLOTS) && + if ((id < KVM_USER_MEM_SLOTS) && ((mem->userspace_addr & (PAGE_SIZE - 1)) || !access_ok(VERIFY_WRITE, (void __user *)(unsigned long)mem->userspace_addr, mem->memory_size))) goto out; - if (mem->slot >= KVM_MEM_SLOTS_NUM) + if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM) goto out; if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) goto out; - slot = id_to_memslot(kvm->memslots, mem->slot); + slot = id_to_memslot(__kvm_memslots(kvm, as_id), id); base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; npages = mem->memory_size >> PAGE_SHIFT; if (npages > KVM_MEM_MAX_NR_PAGES) goto out; - if (!npages) - mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; - new = old = *slot; - new.id = mem->slot; + new.id = id; new.base_gfn = base_gfn; new.npages = npages; new.flags = mem->flags; @@ -828,17 +839,21 @@ int __kvm_set_memory_region(struct kvm *kvm, goto out; } } - } else if (old.npages) { + } else { + if (!old.npages) + goto out; + change = KVM_MR_DELETE; - } else /* Modify a non-existent slot: disallowed. */ - goto out; + new.base_gfn = 0; + new.flags = 0; + } if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { /* Check for overlaps */ r = -EEXIST; - kvm_for_each_memslot(slot, kvm->memslots) { + kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) { if ((slot->id >= KVM_USER_MEM_SLOTS) || - (slot->id == mem->slot)) + (slot->id == id)) continue; if (!((base_gfn + npages <= slot->base_gfn) || (base_gfn >= slot->base_gfn + slot->npages))) @@ -867,13 +882,13 @@ int __kvm_set_memory_region(struct kvm *kvm, slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); if (!slots) goto out_free; - memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); + memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots)); if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { - slot = id_to_memslot(slots, mem->slot); + slot = id_to_memslot(slots, id); slot->flags |= KVM_MEMSLOT_INVALID; - old_memslots = install_new_memslots(kvm, slots); + old_memslots = install_new_memslots(kvm, as_id, slots); /* slot was deleted or moved, clear iommu mapping */ kvm_iommu_unmap_pages(kvm, &old); @@ -898,18 +913,18 @@ int __kvm_set_memory_region(struct kvm *kvm, if (r) goto out_slots; - /* actual memory is freed via old in kvm_free_physmem_slot below */ + /* actual memory is freed via old in kvm_free_memslot below */ if (change == KVM_MR_DELETE) { new.dirty_bitmap = NULL; memset(&new.arch, 0, sizeof(new.arch)); } update_memslots(slots, &new); - old_memslots = install_new_memslots(kvm, slots); + old_memslots = install_new_memslots(kvm, as_id, slots); - kvm_arch_commit_memory_region(kvm, mem, &old, change); + kvm_arch_commit_memory_region(kvm, mem, &old, &new, change); - kvm_free_physmem_slot(kvm, &old, &new); + kvm_free_memslot(kvm, &old, &new); kvfree(old_memslots); /* @@ -931,14 +946,14 @@ int __kvm_set_memory_region(struct kvm *kvm, out_slots: kvfree(slots); out_free: - kvm_free_physmem_slot(kvm, &new, &old); + kvm_free_memslot(kvm, &new, &old); out: return r; } EXPORT_SYMBOL_GPL(__kvm_set_memory_region); int kvm_set_memory_region(struct kvm *kvm, - struct kvm_userspace_memory_region *mem) + const struct kvm_userspace_memory_region *mem) { int r; @@ -952,24 +967,29 @@ EXPORT_SYMBOL_GPL(kvm_set_memory_region); static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem) { - if (mem->slot >= KVM_USER_MEM_SLOTS) + if ((u16)mem->slot >= KVM_USER_MEM_SLOTS) return -EINVAL; + return kvm_set_memory_region(kvm, mem); } int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, int *is_dirty) { + struct kvm_memslots *slots; struct kvm_memory_slot *memslot; - int r, i; + int r, i, as_id, id; unsigned long n; unsigned long any = 0; r = -EINVAL; - if (log->slot >= KVM_USER_MEM_SLOTS) + as_id = log->slot >> 16; + id = (u16)log->slot; + if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) goto out; - memslot = id_to_memslot(kvm->memslots, log->slot); + slots = __kvm_memslots(kvm, as_id); + memslot = id_to_memslot(slots, id); r = -ENOENT; if (!memslot->dirty_bitmap) goto out; @@ -1018,17 +1038,21 @@ EXPORT_SYMBOL_GPL(kvm_get_dirty_log); int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log, bool *is_dirty) { + struct kvm_memslots *slots; struct kvm_memory_slot *memslot; - int r, i; + int r, i, as_id, id; unsigned long n; unsigned long *dirty_bitmap; unsigned long *dirty_bitmap_buffer; r = -EINVAL; - if (log->slot >= KVM_USER_MEM_SLOTS) + as_id = log->slot >> 16; + id = (u16)log->slot; + if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) goto out; - memslot = id_to_memslot(kvm->memslots, log->slot); + slots = __kvm_memslots(kvm, as_id); + memslot = id_to_memslot(slots, id); dirty_bitmap = memslot->dirty_bitmap; r = -ENOENT; @@ -1091,6 +1115,11 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) } EXPORT_SYMBOL_GPL(gfn_to_memslot); +struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) +{ + return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn); +} + int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) { struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); @@ -1166,6 +1195,12 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) } EXPORT_SYMBOL_GPL(gfn_to_hva); +unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) +{ + return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); +} +EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva); + /* * If writable is set to false, the hva returned by this function is only * allowed to be read. @@ -1188,6 +1223,13 @@ unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) return gfn_to_hva_memslot_prot(slot, gfn, writable); } +unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) +{ + struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); + + return gfn_to_hva_memslot_prot(slot, gfn, writable); +} + static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int write, struct page **page) { @@ -1355,9 +1397,8 @@ exit: return pfn; } -static pfn_t -__gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, - bool *async, bool write_fault, bool *writable) +pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, + bool *async, bool write_fault, bool *writable) { unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); @@ -1376,65 +1417,59 @@ __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, return hva_to_pfn(addr, atomic, async, write_fault, writable); } +EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot); -static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async, - bool write_fault, bool *writable) +pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, + bool *writable) { - struct kvm_memory_slot *slot; - - if (async) - *async = false; - - slot = gfn_to_memslot(kvm, gfn); - - return __gfn_to_pfn_memslot(slot, gfn, atomic, async, write_fault, - writable); + return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL, + write_fault, writable); } +EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); -pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) +pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) { - return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL); + return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL); } -EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic); +EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot); -pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, - bool write_fault, bool *writable) +pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) { - return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable); + return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL); } -EXPORT_SYMBOL_GPL(gfn_to_pfn_async); +EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); -pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) +pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) { - return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL); + return gfn_to_pfn_memslot_atomic(gfn_to_memslot(kvm, gfn), gfn); } -EXPORT_SYMBOL_GPL(gfn_to_pfn); +EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic); -pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, - bool *writable) +pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn) { - return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable); + return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); } -EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); +EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic); -pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) +pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) { - return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL); + return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); } +EXPORT_SYMBOL_GPL(gfn_to_pfn); -pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) +pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) { - return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL); + return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); } -EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); +EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn); -int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, - int nr_pages) +int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, + struct page **pages, int nr_pages) { unsigned long addr; gfn_t entry; - addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry); + addr = gfn_to_hva_many(slot, gfn, &entry); if (kvm_is_error_hva(addr)) return -1; @@ -1468,6 +1503,16 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) } EXPORT_SYMBOL_GPL(gfn_to_page); +struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn) +{ + pfn_t pfn; + + pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn); + + return kvm_pfn_to_page(pfn); +} +EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page); + void kvm_release_page_clean(struct page *page) { WARN_ON(is_error_page(page)); @@ -1530,13 +1575,13 @@ static int next_segment(unsigned long len, int offset) return len; } -int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, - int len) +static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, + void *data, int offset, int len) { int r; unsigned long addr; - addr = gfn_to_hva_prot(kvm, gfn, NULL); + addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); if (kvm_is_error_hva(addr)) return -EFAULT; r = __copy_from_user(data, (void __user *)addr + offset, len); @@ -1544,8 +1589,25 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, return -EFAULT; return 0; } + +int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, + int len) +{ + struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); + + return __kvm_read_guest_page(slot, gfn, data, offset, len); +} EXPORT_SYMBOL_GPL(kvm_read_guest_page); +int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, + int offset, int len) +{ + struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); + + return __kvm_read_guest_page(slot, gfn, data, offset, len); +} +EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page); + int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) { gfn_t gfn = gpa >> PAGE_SHIFT; @@ -1566,15 +1628,33 @@ int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) } EXPORT_SYMBOL_GPL(kvm_read_guest); -int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, - unsigned long len) +int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len) { - int r; - unsigned long addr; gfn_t gfn = gpa >> PAGE_SHIFT; + int seg; int offset = offset_in_page(gpa); + int ret; + + while ((seg = next_segment(len, offset)) != 0) { + ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); + if (ret < 0) + return ret; + offset = 0; + len -= seg; + data += seg; + ++gfn; + } + return 0; +} +EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest); - addr = gfn_to_hva_prot(kvm, gfn, NULL); +static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, + void *data, int offset, unsigned long len) +{ + int r; + unsigned long addr; + + addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); if (kvm_is_error_hva(addr)) return -EFAULT; pagefault_disable(); @@ -1584,25 +1664,63 @@ int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, return -EFAULT; return 0; } -EXPORT_SYMBOL(kvm_read_guest_atomic); -int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, - int offset, int len) +int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, + unsigned long len) +{ + gfn_t gfn = gpa >> PAGE_SHIFT; + struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); + int offset = offset_in_page(gpa); + + return __kvm_read_guest_atomic(slot, gfn, data, offset, len); +} +EXPORT_SYMBOL_GPL(kvm_read_guest_atomic); + +int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, + void *data, unsigned long len) +{ + gfn_t gfn = gpa >> PAGE_SHIFT; + struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); + int offset = offset_in_page(gpa); + + return __kvm_read_guest_atomic(slot, gfn, data, offset, len); +} +EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic); + +static int __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn, + const void *data, int offset, int len) { int r; unsigned long addr; - addr = gfn_to_hva(kvm, gfn); + addr = gfn_to_hva_memslot(memslot, gfn); if (kvm_is_error_hva(addr)) return -EFAULT; r = __copy_to_user((void __user *)addr + offset, data, len); if (r) return -EFAULT; - mark_page_dirty(kvm, gfn); + mark_page_dirty_in_slot(memslot, gfn); return 0; } + +int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, + const void *data, int offset, int len) +{ + struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); + + return __kvm_write_guest_page(slot, gfn, data, offset, len); +} EXPORT_SYMBOL_GPL(kvm_write_guest_page); +int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, + const void *data, int offset, int len) +{ + struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); + + return __kvm_write_guest_page(slot, gfn, data, offset, len); +} +EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page); + int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len) { @@ -1624,6 +1742,27 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, } EXPORT_SYMBOL_GPL(kvm_write_guest); +int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, + unsigned long len) +{ + gfn_t gfn = gpa >> PAGE_SHIFT; + int seg; + int offset = offset_in_page(gpa); + int ret; + + while ((seg = next_segment(len, offset)) != 0) { + ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); + if (ret < 0) + return ret; + offset = 0; + len -= seg; + data += seg; + ++gfn; + } + return 0; +} +EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest); + int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, gpa_t gpa, unsigned long len) { @@ -1681,7 +1820,7 @@ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, r = __copy_to_user((void __user *)ghc->hva, data, len); if (r) return -EFAULT; - mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT); + mark_page_dirty_in_slot(ghc->memslot, ghc->gpa >> PAGE_SHIFT); return 0; } @@ -1739,8 +1878,7 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) } EXPORT_SYMBOL_GPL(kvm_clear_guest); -static void mark_page_dirty_in_slot(struct kvm *kvm, - struct kvm_memory_slot *memslot, +static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn) { if (memslot && memslot->dirty_bitmap) { @@ -1755,10 +1893,19 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn) struct kvm_memory_slot *memslot; memslot = gfn_to_memslot(kvm, gfn); - mark_page_dirty_in_slot(kvm, memslot, gfn); + mark_page_dirty_in_slot(memslot, gfn); } EXPORT_SYMBOL_GPL(mark_page_dirty); +void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) +{ + struct kvm_memory_slot *memslot; + + memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); + mark_page_dirty_in_slot(memslot, gfn); +} +EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty); + static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) { if (kvm_arch_vcpu_runnable(vcpu)) { @@ -2487,6 +2634,10 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING case KVM_CAP_IRQ_ROUTING: return KVM_MAX_IRQ_ROUTES; +#endif +#if KVM_ADDRESS_SPACE_NUM > 1 + case KVM_CAP_MULTI_ADDRESS_SPACE: + return KVM_ADDRESS_SPACE_NUM; #endif default: break; @@ -2882,18 +3033,12 @@ static int hardware_enable_all(void) static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, void *v) { - int cpu = (long)v; - val &= ~CPU_TASKS_FROZEN; switch (val) { case CPU_DYING: - pr_info("kvm: disabling virtualization on CPU%d\n", - cpu); hardware_disable(); break; case CPU_STARTING: - pr_info("kvm: enabling virtualization on CPU%d\n", - cpu); hardware_enable(); break; } diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c index 620e37f74..1dd087da6 100644 --- a/virt/kvm/vfio.c +++ b/virt/kvm/vfio.c @@ -155,6 +155,8 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg) list_add_tail(&kvg->node, &kv->group_list); kvg->vfio_group = vfio_group; + kvm_arch_start_assignment(dev->kvm); + mutex_unlock(&kv->lock); kvm_vfio_update_coherency(dev); @@ -190,6 +192,8 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg) break; } + kvm_arch_end_assignment(dev->kvm); + mutex_unlock(&kv->lock); kvm_vfio_group_put_external_user(vfio_group); @@ -239,6 +243,7 @@ static void kvm_vfio_destroy(struct kvm_device *dev) kvm_vfio_group_put_external_user(kvg->vfio_group); list_del(&kvg->node); kfree(kvg); + kvm_arch_end_assignment(dev->kvm); } kvm_vfio_update_coherency(dev); -- cgit v1.2.3