diff options
Diffstat (limited to 'virt/kvm/arm')
-rw-r--r-- | virt/kvm/arm/arch_timer.c | 360 | ||||
-rw-r--r-- | virt/kvm/arm/vgic-v2-emul.c | 856 | ||||
-rw-r--r-- | virt/kvm/arm/vgic-v2.c | 259 | ||||
-rw-r--r-- | virt/kvm/arm/vgic-v3-emul.c | 1030 | ||||
-rw-r--r-- | virt/kvm/arm/vgic-v3.c | 287 | ||||
-rw-r--r-- | virt/kvm/arm/vgic.c | 2173 | ||||
-rw-r--r-- | virt/kvm/arm/vgic.h | 140 |
7 files changed, 5105 insertions, 0 deletions
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c new file mode 100644 index 000000000..98c95f2fc --- /dev/null +++ b/virt/kvm/arm/arch_timer.c @@ -0,0 +1,360 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/cpu.h> +#include <linux/of_irq.h> +#include <linux/kvm.h> +#include <linux/kvm_host.h> +#include <linux/interrupt.h> + +#include <clocksource/arm_arch_timer.h> +#include <asm/arch_timer.h> + +#include <kvm/arm_vgic.h> +#include <kvm/arm_arch_timer.h> + +static struct timecounter *timecounter; +static struct workqueue_struct *wqueue; +static unsigned int host_vtimer_irq; + +static cycle_t kvm_phys_timer_read(void) +{ + return timecounter->cc->read(timecounter->cc); +} + +static bool timer_is_armed(struct arch_timer_cpu *timer) +{ + return timer->armed; +} + +/* timer_arm: as in "arm the timer", not as in ARM the company */ +static void timer_arm(struct arch_timer_cpu *timer, u64 ns) +{ + timer->armed = true; + hrtimer_start(&timer->timer, ktime_add_ns(ktime_get(), ns), + HRTIMER_MODE_ABS); +} + +static void timer_disarm(struct arch_timer_cpu *timer) +{ + if (timer_is_armed(timer)) { + hrtimer_cancel(&timer->timer); + cancel_work_sync(&timer->expired); + timer->armed = false; + } +} + +static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu) +{ + int ret; + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + + timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK; + ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, + timer->irq->irq, + timer->irq->level); + WARN_ON(ret); +} + +static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) +{ + struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id; + + /* + * We disable the timer in the world switch and let it be + * handled by kvm_timer_sync_hwstate(). Getting a timer + * interrupt at this point is a sure sign of some major + * breakage. + */ + pr_warn("Unexpected interrupt %d on vcpu %p\n", irq, vcpu); + return IRQ_HANDLED; +} + +/* + * Work function for handling the backup timer that we schedule when a vcpu is + * no longer running, but had a timer programmed to fire in the future. + */ +static void kvm_timer_inject_irq_work(struct work_struct *work) +{ + struct kvm_vcpu *vcpu; + + vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired); + vcpu->arch.timer_cpu.armed = false; + + /* + * If the vcpu is blocked we want to wake it up so that it will see + * the timer has expired when entering the guest. + */ + kvm_vcpu_kick(vcpu); +} + +static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt) +{ + struct arch_timer_cpu *timer; + timer = container_of(hrt, struct arch_timer_cpu, timer); + queue_work(wqueue, &timer->expired); + return HRTIMER_NORESTART; +} + +bool kvm_timer_should_fire(struct kvm_vcpu *vcpu) +{ + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + cycle_t cval, now; + + if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) || + !(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE)) + return false; + + cval = timer->cntv_cval; + now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; + + return cval <= now; +} + +/** + * kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu + * @vcpu: The vcpu pointer + * + * Disarm any pending soft timers, since the world-switch code will write the + * virtual timer state back to the physical CPU. + */ +void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) +{ + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + + /* + * We're about to run this vcpu again, so there is no need to + * keep the background timer running, as we're about to + * populate the CPU timer again. + */ + timer_disarm(timer); + + /* + * If the timer expired while we were not scheduled, now is the time + * to inject it. + */ + if (kvm_timer_should_fire(vcpu)) + kvm_timer_inject_irq(vcpu); +} + +/** + * kvm_timer_sync_hwstate - sync timer state from cpu + * @vcpu: The vcpu pointer + * + * Check if the virtual timer was armed and either schedule a corresponding + * soft timer or inject directly if already expired. + */ +void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) +{ + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + cycle_t cval, now; + u64 ns; + + BUG_ON(timer_is_armed(timer)); + + if (kvm_timer_should_fire(vcpu)) { + /* + * Timer has already expired while we were not + * looking. Inject the interrupt and carry on. + */ + kvm_timer_inject_irq(vcpu); + return; + } + + cval = timer->cntv_cval; + now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; + + ns = cyclecounter_cyc2ns(timecounter->cc, cval - now, timecounter->mask, + &timecounter->frac); + timer_arm(timer, ns); +} + +void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, + const struct kvm_irq_level *irq) +{ + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + + /* + * The vcpu timer irq number cannot be determined in + * kvm_timer_vcpu_init() because it is called much before + * kvm_vcpu_set_target(). To handle this, we determine + * vcpu timer irq number when the vcpu is reset. + */ + timer->irq = irq; +} + +void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) +{ + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + + INIT_WORK(&timer->expired, kvm_timer_inject_irq_work); + hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + timer->timer.function = kvm_timer_expire; +} + +static void kvm_timer_init_interrupt(void *info) +{ + enable_percpu_irq(host_vtimer_irq, 0); +} + +int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) +{ + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + + switch (regid) { + case KVM_REG_ARM_TIMER_CTL: + timer->cntv_ctl = value; + break; + case KVM_REG_ARM_TIMER_CNT: + vcpu->kvm->arch.timer.cntvoff = kvm_phys_timer_read() - value; + break; + case KVM_REG_ARM_TIMER_CVAL: + timer->cntv_cval = value; + break; + default: + return -1; + } + return 0; +} + +u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) +{ + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + + switch (regid) { + case KVM_REG_ARM_TIMER_CTL: + return timer->cntv_ctl; + case KVM_REG_ARM_TIMER_CNT: + return kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; + case KVM_REG_ARM_TIMER_CVAL: + return timer->cntv_cval; + } + return (u64)-1; +} + +static int kvm_timer_cpu_notify(struct notifier_block *self, + unsigned long action, void *cpu) +{ + switch (action) { + case CPU_STARTING: + case CPU_STARTING_FROZEN: + kvm_timer_init_interrupt(NULL); + break; + case CPU_DYING: + case CPU_DYING_FROZEN: + disable_percpu_irq(host_vtimer_irq); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block kvm_timer_cpu_nb = { + .notifier_call = kvm_timer_cpu_notify, +}; + +static const struct of_device_id arch_timer_of_match[] = { + { .compatible = "arm,armv7-timer", }, + { .compatible = "arm,armv8-timer", }, + {}, +}; + +int kvm_timer_hyp_init(void) +{ + struct device_node *np; + unsigned int ppi; + int err; + + timecounter = arch_timer_get_timecounter(); + if (!timecounter) + return -ENODEV; + + np = of_find_matching_node(NULL, arch_timer_of_match); + if (!np) { + kvm_err("kvm_arch_timer: can't find DT node\n"); + return -ENODEV; + } + + ppi = irq_of_parse_and_map(np, 2); + if (!ppi) { + kvm_err("kvm_arch_timer: no virtual timer interrupt\n"); + err = -EINVAL; + goto out; + } + + err = request_percpu_irq(ppi, kvm_arch_timer_handler, + "kvm guest timer", kvm_get_running_vcpus()); + if (err) { + kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n", + ppi, err); + goto out; + } + + host_vtimer_irq = ppi; + + err = __register_cpu_notifier(&kvm_timer_cpu_nb); + if (err) { + kvm_err("Cannot register timer CPU notifier\n"); + goto out_free; + } + + wqueue = create_singlethread_workqueue("kvm_arch_timer"); + if (!wqueue) { + err = -ENOMEM; + goto out_free; + } + + kvm_info("%s IRQ%d\n", np->name, ppi); + on_each_cpu(kvm_timer_init_interrupt, NULL, 1); + + goto out; +out_free: + free_percpu_irq(ppi, kvm_get_running_vcpus()); +out: + of_node_put(np); + return err; +} + +void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) +{ + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + + timer_disarm(timer); +} + +void kvm_timer_enable(struct kvm *kvm) +{ + if (kvm->arch.timer.enabled) + return; + + /* + * There is a potential race here between VCPUs starting for the first + * time, which may be enabling the timer multiple times. That doesn't + * hurt though, because we're just setting a variable to the same + * variable that it already was. The important thing is that all + * VCPUs have the enabled variable set, before entering the guest, if + * the arch timers are enabled. + */ + if (timecounter && wqueue) + kvm->arch.timer.enabled = 1; +} + +void kvm_timer_init(struct kvm *kvm) +{ + kvm->arch.timer.cntvoff = kvm_phys_timer_read(); +} diff --git a/virt/kvm/arm/vgic-v2-emul.c b/virt/kvm/arm/vgic-v2-emul.c new file mode 100644 index 000000000..13907970d --- /dev/null +++ b/virt/kvm/arm/vgic-v2-emul.c @@ -0,0 +1,856 @@ +/* + * Contains GICv2 specific emulation code, was in vgic.c before. + * + * Copyright (C) 2012 ARM Ltd. + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/cpu.h> +#include <linux/kvm.h> +#include <linux/kvm_host.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/uaccess.h> + +#include <linux/irqchip/arm-gic.h> + +#include <asm/kvm_emulate.h> +#include <asm/kvm_arm.h> +#include <asm/kvm_mmu.h> + +#include "vgic.h" + +#define GICC_ARCH_VERSION_V2 0x2 + +static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg); +static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi) +{ + return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi; +} + +static bool handle_mmio_misc(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, phys_addr_t offset) +{ + u32 reg; + u32 word_offset = offset & 3; + + switch (offset & ~3) { + case 0: /* GICD_CTLR */ + reg = vcpu->kvm->arch.vgic.enabled; + vgic_reg_access(mmio, ®, word_offset, + ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); + if (mmio->is_write) { + vcpu->kvm->arch.vgic.enabled = reg & 1; + vgic_update_state(vcpu->kvm); + return true; + } + break; + + case 4: /* GICD_TYPER */ + reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5; + reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1; + vgic_reg_access(mmio, ®, word_offset, + ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); + break; + + case 8: /* GICD_IIDR */ + reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0); + vgic_reg_access(mmio, ®, word_offset, + ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); + break; + } + + return false; +} + +static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + return vgic_handle_enable_reg(vcpu->kvm, mmio, offset, + vcpu->vcpu_id, ACCESS_WRITE_SETBIT); +} + +static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + return vgic_handle_enable_reg(vcpu->kvm, mmio, offset, + vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT); +} + +static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset, + vcpu->vcpu_id); +} + +static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset, + vcpu->vcpu_id); +} + +static bool handle_mmio_set_active_reg(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + return vgic_handle_set_active_reg(vcpu->kvm, mmio, offset, + vcpu->vcpu_id); +} + +static bool handle_mmio_clear_active_reg(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + return vgic_handle_clear_active_reg(vcpu->kvm, mmio, offset, + vcpu->vcpu_id); +} + +static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority, + vcpu->vcpu_id, offset); + vgic_reg_access(mmio, reg, offset, + ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); + return false; +} + +#define GICD_ITARGETSR_SIZE 32 +#define GICD_CPUTARGETS_BITS 8 +#define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS) +static u32 vgic_get_target_reg(struct kvm *kvm, int irq) +{ + struct vgic_dist *dist = &kvm->arch.vgic; + int i; + u32 val = 0; + + irq -= VGIC_NR_PRIVATE_IRQS; + + for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) + val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8); + + return val; +} + +static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq) +{ + struct vgic_dist *dist = &kvm->arch.vgic; + struct kvm_vcpu *vcpu; + int i, c; + unsigned long *bmap; + u32 target; + + irq -= VGIC_NR_PRIVATE_IRQS; + + /* + * Pick the LSB in each byte. This ensures we target exactly + * one vcpu per IRQ. If the byte is null, assume we target + * CPU0. + */ + for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) { + int shift = i * GICD_CPUTARGETS_BITS; + + target = ffs((val >> shift) & 0xffU); + target = target ? (target - 1) : 0; + dist->irq_spi_cpu[irq + i] = target; + kvm_for_each_vcpu(c, vcpu, kvm) { + bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]); + if (c == target) + set_bit(irq + i, bmap); + else + clear_bit(irq + i, bmap); + } + } +} + +static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + u32 reg; + + /* We treat the banked interrupts targets as read-only */ + if (offset < 32) { + u32 roreg; + + roreg = 1 << vcpu->vcpu_id; + roreg |= roreg << 8; + roreg |= roreg << 16; + + vgic_reg_access(mmio, &roreg, offset, + ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); + return false; + } + + reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U); + vgic_reg_access(mmio, ®, offset, + ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); + if (mmio->is_write) { + vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U); + vgic_update_state(vcpu->kvm); + return true; + } + + return false; +} + +static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, phys_addr_t offset) +{ + u32 *reg; + + reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg, + vcpu->vcpu_id, offset >> 1); + + return vgic_handle_cfg_reg(reg, mmio, offset); +} + +static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, phys_addr_t offset) +{ + u32 reg; + + vgic_reg_access(mmio, ®, offset, + ACCESS_READ_RAZ | ACCESS_WRITE_VALUE); + if (mmio->is_write) { + vgic_dispatch_sgi(vcpu, reg); + vgic_update_state(vcpu->kvm); + return true; + } + + return false; +} + +/* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */ +static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + int sgi; + int min_sgi = (offset & ~0x3); + int max_sgi = min_sgi + 3; + int vcpu_id = vcpu->vcpu_id; + u32 reg = 0; + + /* Copy source SGIs from distributor side */ + for (sgi = min_sgi; sgi <= max_sgi; sgi++) { + u8 sources = *vgic_get_sgi_sources(dist, vcpu_id, sgi); + + reg |= ((u32)sources) << (8 * (sgi - min_sgi)); + } + + mmio_data_write(mmio, ~0, reg); + return false; +} + +static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset, bool set) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + int sgi; + int min_sgi = (offset & ~0x3); + int max_sgi = min_sgi + 3; + int vcpu_id = vcpu->vcpu_id; + u32 reg; + bool updated = false; + + reg = mmio_data_read(mmio, ~0); + + /* Clear pending SGIs on the distributor */ + for (sgi = min_sgi; sgi <= max_sgi; sgi++) { + u8 mask = reg >> (8 * (sgi - min_sgi)); + u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi); + + if (set) { + if ((*src & mask) != mask) + updated = true; + *src |= mask; + } else { + if (*src & mask) + updated = true; + *src &= ~mask; + } + } + + if (updated) + vgic_update_state(vcpu->kvm); + + return updated; +} + +static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + if (!mmio->is_write) + return read_set_clear_sgi_pend_reg(vcpu, mmio, offset); + else + return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true); +} + +static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + if (!mmio->is_write) + return read_set_clear_sgi_pend_reg(vcpu, mmio, offset); + else + return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false); +} + +static const struct vgic_io_range vgic_dist_ranges[] = { + { + .base = GIC_DIST_CTRL, + .len = 12, + .bits_per_irq = 0, + .handle_mmio = handle_mmio_misc, + }, + { + .base = GIC_DIST_IGROUP, + .len = VGIC_MAX_IRQS / 8, + .bits_per_irq = 1, + .handle_mmio = handle_mmio_raz_wi, + }, + { + .base = GIC_DIST_ENABLE_SET, + .len = VGIC_MAX_IRQS / 8, + .bits_per_irq = 1, + .handle_mmio = handle_mmio_set_enable_reg, + }, + { + .base = GIC_DIST_ENABLE_CLEAR, + .len = VGIC_MAX_IRQS / 8, + .bits_per_irq = 1, + .handle_mmio = handle_mmio_clear_enable_reg, + }, + { + .base = GIC_DIST_PENDING_SET, + .len = VGIC_MAX_IRQS / 8, + .bits_per_irq = 1, + .handle_mmio = handle_mmio_set_pending_reg, + }, + { + .base = GIC_DIST_PENDING_CLEAR, + .len = VGIC_MAX_IRQS / 8, + .bits_per_irq = 1, + .handle_mmio = handle_mmio_clear_pending_reg, + }, + { + .base = GIC_DIST_ACTIVE_SET, + .len = VGIC_MAX_IRQS / 8, + .bits_per_irq = 1, + .handle_mmio = handle_mmio_set_active_reg, + }, + { + .base = GIC_DIST_ACTIVE_CLEAR, + .len = VGIC_MAX_IRQS / 8, + .bits_per_irq = 1, + .handle_mmio = handle_mmio_clear_active_reg, + }, + { + .base = GIC_DIST_PRI, + .len = VGIC_MAX_IRQS, + .bits_per_irq = 8, + .handle_mmio = handle_mmio_priority_reg, + }, + { + .base = GIC_DIST_TARGET, + .len = VGIC_MAX_IRQS, + .bits_per_irq = 8, + .handle_mmio = handle_mmio_target_reg, + }, + { + .base = GIC_DIST_CONFIG, + .len = VGIC_MAX_IRQS / 4, + .bits_per_irq = 2, + .handle_mmio = handle_mmio_cfg_reg, + }, + { + .base = GIC_DIST_SOFTINT, + .len = 4, + .handle_mmio = handle_mmio_sgi_reg, + }, + { + .base = GIC_DIST_SGI_PENDING_CLEAR, + .len = VGIC_NR_SGIS, + .handle_mmio = handle_mmio_sgi_clear, + }, + { + .base = GIC_DIST_SGI_PENDING_SET, + .len = VGIC_NR_SGIS, + .handle_mmio = handle_mmio_sgi_set, + }, + {} +}; + +static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg) +{ + struct kvm *kvm = vcpu->kvm; + struct vgic_dist *dist = &kvm->arch.vgic; + int nrcpus = atomic_read(&kvm->online_vcpus); + u8 target_cpus; + int sgi, mode, c, vcpu_id; + + vcpu_id = vcpu->vcpu_id; + + sgi = reg & 0xf; + target_cpus = (reg >> 16) & 0xff; + mode = (reg >> 24) & 3; + + switch (mode) { + case 0: + if (!target_cpus) + return; + break; + + case 1: + target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff; + break; + + case 2: + target_cpus = 1 << vcpu_id; + break; + } + + kvm_for_each_vcpu(c, vcpu, kvm) { + if (target_cpus & 1) { + /* Flag the SGI as pending */ + vgic_dist_irq_set_pending(vcpu, sgi); + *vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id; + kvm_debug("SGI%d from CPU%d to CPU%d\n", + sgi, vcpu_id, c); + } + + target_cpus >>= 1; + } +} + +static bool vgic_v2_queue_sgi(struct kvm_vcpu *vcpu, int irq) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + unsigned long sources; + int vcpu_id = vcpu->vcpu_id; + int c; + + sources = *vgic_get_sgi_sources(dist, vcpu_id, irq); + + for_each_set_bit(c, &sources, dist->nr_cpus) { + if (vgic_queue_irq(vcpu, c, irq)) + clear_bit(c, &sources); + } + + *vgic_get_sgi_sources(dist, vcpu_id, irq) = sources; + + /* + * If the sources bitmap has been cleared it means that we + * could queue all the SGIs onto link registers (see the + * clear_bit above), and therefore we are done with them in + * our emulated gic and can get rid of them. + */ + if (!sources) { + vgic_dist_irq_clear_pending(vcpu, irq); + vgic_cpu_irq_clear(vcpu, irq); + return true; + } + + return false; +} + +/** + * kvm_vgic_map_resources - Configure global VGIC state before running any VCPUs + * @kvm: pointer to the kvm struct + * + * Map the virtual CPU interface into the VM before running any VCPUs. We + * can't do this at creation time, because user space must first set the + * virtual CPU interface address in the guest physical address space. + */ +static int vgic_v2_map_resources(struct kvm *kvm, + const struct vgic_params *params) +{ + struct vgic_dist *dist = &kvm->arch.vgic; + int ret = 0; + + if (!irqchip_in_kernel(kvm)) + return 0; + + mutex_lock(&kvm->lock); + + if (vgic_ready(kvm)) + goto out; + + if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) || + IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) { + kvm_err("Need to set vgic cpu and dist addresses first\n"); + ret = -ENXIO; + goto out; + } + + vgic_register_kvm_io_dev(kvm, dist->vgic_dist_base, + KVM_VGIC_V2_DIST_SIZE, + vgic_dist_ranges, -1, &dist->dist_iodev); + + /* + * Initialize the vgic if this hasn't already been done on demand by + * accessing the vgic state from userspace. + */ + ret = vgic_init(kvm); + if (ret) { + kvm_err("Unable to allocate maps\n"); + goto out_unregister; + } + + ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base, + params->vcpu_base, KVM_VGIC_V2_CPU_SIZE, + true); + if (ret) { + kvm_err("Unable to remap VGIC CPU to VCPU\n"); + goto out_unregister; + } + + dist->ready = true; + goto out; + +out_unregister: + kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dist->dist_iodev.dev); + +out: + if (ret) + kvm_vgic_destroy(kvm); + mutex_unlock(&kvm->lock); + return ret; +} + +static void vgic_v2_add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + + *vgic_get_sgi_sources(dist, vcpu->vcpu_id, irq) |= 1 << source; +} + +static int vgic_v2_init_model(struct kvm *kvm) +{ + int i; + + for (i = VGIC_NR_PRIVATE_IRQS; i < kvm->arch.vgic.nr_irqs; i += 4) + vgic_set_target_reg(kvm, 0, i); + + return 0; +} + +void vgic_v2_init_emulation(struct kvm *kvm) +{ + struct vgic_dist *dist = &kvm->arch.vgic; + + dist->vm_ops.queue_sgi = vgic_v2_queue_sgi; + dist->vm_ops.add_sgi_source = vgic_v2_add_sgi_source; + dist->vm_ops.init_model = vgic_v2_init_model; + dist->vm_ops.map_resources = vgic_v2_map_resources; + + kvm->arch.max_vcpus = VGIC_V2_MAX_CPUS; +} + +static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, phys_addr_t offset) +{ + bool updated = false; + struct vgic_vmcr vmcr; + u32 *vmcr_field; + u32 reg; + + vgic_get_vmcr(vcpu, &vmcr); + + switch (offset & ~0x3) { + case GIC_CPU_CTRL: + vmcr_field = &vmcr.ctlr; + break; + case GIC_CPU_PRIMASK: + vmcr_field = &vmcr.pmr; + break; + case GIC_CPU_BINPOINT: + vmcr_field = &vmcr.bpr; + break; + case GIC_CPU_ALIAS_BINPOINT: + vmcr_field = &vmcr.abpr; + break; + default: + BUG(); + } + + if (!mmio->is_write) { + reg = *vmcr_field; + mmio_data_write(mmio, ~0, reg); + } else { + reg = mmio_data_read(mmio, ~0); + if (reg != *vmcr_field) { + *vmcr_field = reg; + vgic_set_vmcr(vcpu, &vmcr); + updated = true; + } + } + return updated; +} + +static bool handle_mmio_abpr(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, phys_addr_t offset) +{ + return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT); +} + +static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + u32 reg; + + if (mmio->is_write) + return false; + + /* GICC_IIDR */ + reg = (PRODUCT_ID_KVM << 20) | + (GICC_ARCH_VERSION_V2 << 16) | + (IMPLEMENTER_ARM << 0); + mmio_data_write(mmio, ~0, reg); + return false; +} + +/* + * CPU Interface Register accesses - these are not accessed by the VM, but by + * user space for saving and restoring VGIC state. + */ +static const struct vgic_io_range vgic_cpu_ranges[] = { + { + .base = GIC_CPU_CTRL, + .len = 12, + .handle_mmio = handle_cpu_mmio_misc, + }, + { + .base = GIC_CPU_ALIAS_BINPOINT, + .len = 4, + .handle_mmio = handle_mmio_abpr, + }, + { + .base = GIC_CPU_ACTIVEPRIO, + .len = 16, + .handle_mmio = handle_mmio_raz_wi, + }, + { + .base = GIC_CPU_IDENT, + .len = 4, + .handle_mmio = handle_cpu_mmio_ident, + }, +}; + +static int vgic_attr_regs_access(struct kvm_device *dev, + struct kvm_device_attr *attr, + u32 *reg, bool is_write) +{ + const struct vgic_io_range *r = NULL, *ranges; + phys_addr_t offset; + int ret, cpuid, c; + struct kvm_vcpu *vcpu, *tmp_vcpu; + struct vgic_dist *vgic; + struct kvm_exit_mmio mmio; + u32 data; + + offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; + cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >> + KVM_DEV_ARM_VGIC_CPUID_SHIFT; + + mutex_lock(&dev->kvm->lock); + + ret = vgic_init(dev->kvm); + if (ret) + goto out; + + if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) { + ret = -EINVAL; + goto out; + } + + vcpu = kvm_get_vcpu(dev->kvm, cpuid); + vgic = &dev->kvm->arch.vgic; + + mmio.len = 4; + mmio.is_write = is_write; + mmio.data = &data; + if (is_write) + mmio_data_write(&mmio, ~0, *reg); + switch (attr->group) { + case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: + mmio.phys_addr = vgic->vgic_dist_base + offset; + ranges = vgic_dist_ranges; + break; + case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: + mmio.phys_addr = vgic->vgic_cpu_base + offset; + ranges = vgic_cpu_ranges; + break; + default: + BUG(); + } + r = vgic_find_range(ranges, 4, offset); + + if (unlikely(!r || !r->handle_mmio)) { + ret = -ENXIO; + goto out; + } + + + spin_lock(&vgic->lock); + + /* + * Ensure that no other VCPU is running by checking the vcpu->cpu + * field. If no other VPCUs are running we can safely access the VGIC + * state, because even if another VPU is run after this point, that + * VCPU will not touch the vgic state, because it will block on + * getting the vgic->lock in kvm_vgic_sync_hwstate(). + */ + kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) { + if (unlikely(tmp_vcpu->cpu != -1)) { + ret = -EBUSY; + goto out_vgic_unlock; + } + } + + /* + * Move all pending IRQs from the LRs on all VCPUs so the pending + * state can be properly represented in the register state accessible + * through this API. + */ + kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) + vgic_unqueue_irqs(tmp_vcpu); + + offset -= r->base; + r->handle_mmio(vcpu, &mmio, offset); + + if (!is_write) + *reg = mmio_data_read(&mmio, ~0); + + ret = 0; +out_vgic_unlock: + spin_unlock(&vgic->lock); +out: + mutex_unlock(&dev->kvm->lock); + return ret; +} + +static int vgic_v2_create(struct kvm_device *dev, u32 type) +{ + return kvm_vgic_create(dev->kvm, type); +} + +static void vgic_v2_destroy(struct kvm_device *dev) +{ + kfree(dev); +} + +static int vgic_v2_set_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + int ret; + + ret = vgic_set_common_attr(dev, attr); + if (ret != -ENXIO) + return ret; + + switch (attr->group) { + case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: + case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: { + u32 __user *uaddr = (u32 __user *)(long)attr->addr; + u32 reg; + + if (get_user(reg, uaddr)) + return -EFAULT; + + return vgic_attr_regs_access(dev, attr, ®, true); + } + + } + + return -ENXIO; +} + +static int vgic_v2_get_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + int ret; + + ret = vgic_get_common_attr(dev, attr); + if (ret != -ENXIO) + return ret; + + switch (attr->group) { + case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: + case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: { + u32 __user *uaddr = (u32 __user *)(long)attr->addr; + u32 reg = 0; + + ret = vgic_attr_regs_access(dev, attr, ®, false); + if (ret) + return ret; + return put_user(reg, uaddr); + } + + } + + return -ENXIO; +} + +static int vgic_v2_has_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + phys_addr_t offset; + + switch (attr->group) { + case KVM_DEV_ARM_VGIC_GRP_ADDR: + switch (attr->attr) { + case KVM_VGIC_V2_ADDR_TYPE_DIST: + case KVM_VGIC_V2_ADDR_TYPE_CPU: + return 0; + } + break; + case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: + offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; + return vgic_has_attr_regs(vgic_dist_ranges, offset); + case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: + offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; + return vgic_has_attr_regs(vgic_cpu_ranges, offset); + case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: + return 0; + case KVM_DEV_ARM_VGIC_GRP_CTRL: + switch (attr->attr) { + case KVM_DEV_ARM_VGIC_CTRL_INIT: + return 0; + } + } + return -ENXIO; +} + +struct kvm_device_ops kvm_arm_vgic_v2_ops = { + .name = "kvm-arm-vgic-v2", + .create = vgic_v2_create, + .destroy = vgic_v2_destroy, + .set_attr = vgic_v2_set_attr, + .get_attr = vgic_v2_get_attr, + .has_attr = vgic_v2_has_attr, +}; diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c new file mode 100644 index 000000000..f9b9c7c51 --- /dev/null +++ b/virt/kvm/arm/vgic-v2.c @@ -0,0 +1,259 @@ +/* + * Copyright (C) 2012,2013 ARM Limited, All Rights Reserved. + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/cpu.h> +#include <linux/kvm.h> +#include <linux/kvm_host.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + +#include <linux/irqchip/arm-gic.h> + +#include <asm/kvm_emulate.h> +#include <asm/kvm_arm.h> +#include <asm/kvm_mmu.h> + +static struct vgic_lr vgic_v2_get_lr(const struct kvm_vcpu *vcpu, int lr) +{ + struct vgic_lr lr_desc; + u32 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr]; + + lr_desc.irq = val & GICH_LR_VIRTUALID; + if (lr_desc.irq <= 15) + lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7; + else + lr_desc.source = 0; + lr_desc.state = 0; + + if (val & GICH_LR_PENDING_BIT) + lr_desc.state |= LR_STATE_PENDING; + if (val & GICH_LR_ACTIVE_BIT) + lr_desc.state |= LR_STATE_ACTIVE; + if (val & GICH_LR_EOI) + lr_desc.state |= LR_EOI_INT; + + return lr_desc; +} + +static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr, + struct vgic_lr lr_desc) +{ + u32 lr_val = (lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT) | lr_desc.irq; + + if (lr_desc.state & LR_STATE_PENDING) + lr_val |= GICH_LR_PENDING_BIT; + if (lr_desc.state & LR_STATE_ACTIVE) + lr_val |= GICH_LR_ACTIVE_BIT; + if (lr_desc.state & LR_EOI_INT) + lr_val |= GICH_LR_EOI; + + vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val; +} + +static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, + struct vgic_lr lr_desc) +{ + if (!(lr_desc.state & LR_STATE_MASK)) + vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr); + else + vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr &= ~(1ULL << lr); +} + +static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu) +{ + return vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr; +} + +static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu) +{ + return vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr; +} + +static void vgic_v2_clear_eisr(struct kvm_vcpu *vcpu) +{ + vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr = 0; +} + +static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu) +{ + u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr; + u32 ret = 0; + + if (misr & GICH_MISR_EOI) + ret |= INT_STATUS_EOI; + if (misr & GICH_MISR_U) + ret |= INT_STATUS_UNDERFLOW; + + return ret; +} + +static void vgic_v2_enable_underflow(struct kvm_vcpu *vcpu) +{ + vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr |= GICH_HCR_UIE; +} + +static void vgic_v2_disable_underflow(struct kvm_vcpu *vcpu) +{ + vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr &= ~GICH_HCR_UIE; +} + +static void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) +{ + u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr; + + vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> GICH_VMCR_CTRL_SHIFT; + vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> GICH_VMCR_ALIAS_BINPOINT_SHIFT; + vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> GICH_VMCR_BINPOINT_SHIFT; + vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> GICH_VMCR_PRIMASK_SHIFT; +} + +static void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) +{ + u32 vmcr; + + vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK; + vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & GICH_VMCR_ALIAS_BINPOINT_MASK; + vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & GICH_VMCR_BINPOINT_MASK; + vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK; + + vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr; +} + +static void vgic_v2_enable(struct kvm_vcpu *vcpu) +{ + /* + * By forcing VMCR to zero, the GIC will restore the binary + * points to their reset values. Anything else resets to zero + * anyway. + */ + vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0; + + /* Get the show on the road... */ + vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN; +} + +static const struct vgic_ops vgic_v2_ops = { + .get_lr = vgic_v2_get_lr, + .set_lr = vgic_v2_set_lr, + .sync_lr_elrsr = vgic_v2_sync_lr_elrsr, + .get_elrsr = vgic_v2_get_elrsr, + .get_eisr = vgic_v2_get_eisr, + .clear_eisr = vgic_v2_clear_eisr, + .get_interrupt_status = vgic_v2_get_interrupt_status, + .enable_underflow = vgic_v2_enable_underflow, + .disable_underflow = vgic_v2_disable_underflow, + .get_vmcr = vgic_v2_get_vmcr, + .set_vmcr = vgic_v2_set_vmcr, + .enable = vgic_v2_enable, +}; + +static struct vgic_params vgic_v2_params; + +/** + * vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT + * @node: pointer to the DT node + * @ops: address of a pointer to the GICv2 operations + * @params: address of a pointer to HW-specific parameters + * + * Returns 0 if a GICv2 has been found, with the low level operations + * in *ops and the HW parameters in *params. Returns an error code + * otherwise. + */ +int vgic_v2_probe(struct device_node *vgic_node, + const struct vgic_ops **ops, + const struct vgic_params **params) +{ + int ret; + struct resource vctrl_res; + struct resource vcpu_res; + struct vgic_params *vgic = &vgic_v2_params; + + vgic->maint_irq = irq_of_parse_and_map(vgic_node, 0); + if (!vgic->maint_irq) { + kvm_err("error getting vgic maintenance irq from DT\n"); + ret = -ENXIO; + goto out; + } + + ret = of_address_to_resource(vgic_node, 2, &vctrl_res); + if (ret) { + kvm_err("Cannot obtain GICH resource\n"); + goto out; + } + + vgic->vctrl_base = of_iomap(vgic_node, 2); + if (!vgic->vctrl_base) { + kvm_err("Cannot ioremap GICH\n"); + ret = -ENOMEM; + goto out; + } + + vgic->nr_lr = readl_relaxed(vgic->vctrl_base + GICH_VTR); + vgic->nr_lr = (vgic->nr_lr & 0x3f) + 1; + + ret = create_hyp_io_mappings(vgic->vctrl_base, + vgic->vctrl_base + resource_size(&vctrl_res), + vctrl_res.start); + if (ret) { + kvm_err("Cannot map VCTRL into hyp\n"); + goto out_unmap; + } + + if (of_address_to_resource(vgic_node, 3, &vcpu_res)) { + kvm_err("Cannot obtain GICV resource\n"); + ret = -ENXIO; + goto out_unmap; + } + + if (!PAGE_ALIGNED(vcpu_res.start)) { + kvm_err("GICV physical address 0x%llx not page aligned\n", + (unsigned long long)vcpu_res.start); + ret = -ENXIO; + goto out_unmap; + } + + if (!PAGE_ALIGNED(resource_size(&vcpu_res))) { + kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n", + (unsigned long long)resource_size(&vcpu_res), + PAGE_SIZE); + ret = -ENXIO; + goto out_unmap; + } + + vgic->can_emulate_gicv2 = true; + kvm_register_device_ops(&kvm_arm_vgic_v2_ops, KVM_DEV_TYPE_ARM_VGIC_V2); + + vgic->vcpu_base = vcpu_res.start; + + kvm_info("%s@%llx IRQ%d\n", vgic_node->name, + vctrl_res.start, vgic->maint_irq); + + vgic->type = VGIC_V2; + vgic->max_gic_vcpus = VGIC_V2_MAX_CPUS; + *ops = &vgic_v2_ops; + *params = vgic; + goto out; + +out_unmap: + iounmap(vgic->vctrl_base); +out: + of_node_put(vgic_node); + return ret; +} diff --git a/virt/kvm/arm/vgic-v3-emul.c b/virt/kvm/arm/vgic-v3-emul.c new file mode 100644 index 000000000..e9c3a7a83 --- /dev/null +++ b/virt/kvm/arm/vgic-v3-emul.c @@ -0,0 +1,1030 @@ +/* + * GICv3 distributor and redistributor emulation + * + * GICv3 emulation is currently only supported on a GICv3 host (because + * we rely on the hardware's CPU interface virtualization support), but + * supports both hardware with or without the optional GICv2 backwards + * compatibility features. + * + * Limitations of the emulation: + * (RAZ/WI: read as zero, write ignore, RAO/WI: read as one, write ignore) + * - We do not support LPIs (yet). TYPER.LPIS is reported as 0 and is RAZ/WI. + * - We do not support the message based interrupts (MBIs) triggered by + * writes to the GICD_{SET,CLR}SPI_* registers. TYPER.MBIS is reported as 0. + * - We do not support the (optional) backwards compatibility feature. + * GICD_CTLR.ARE resets to 1 and is RAO/WI. If the _host_ GIC supports + * the compatiblity feature, you can use a GICv2 in the guest, though. + * - We only support a single security state. GICD_CTLR.DS is 1 and is RAO/WI. + * - Priorities are not emulated (same as the GICv2 emulation). Linux + * as a guest is fine with this, because it does not use priorities. + * - We only support Group1 interrupts. Again Linux uses only those. + * + * Copyright (C) 2014 ARM Ltd. + * Author: Andre Przywara <andre.przywara@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/cpu.h> +#include <linux/kvm.h> +#include <linux/kvm_host.h> +#include <linux/interrupt.h> + +#include <linux/irqchip/arm-gic-v3.h> +#include <kvm/arm_vgic.h> + +#include <asm/kvm_emulate.h> +#include <asm/kvm_arm.h> +#include <asm/kvm_mmu.h> + +#include "vgic.h" + +static bool handle_mmio_rao_wi(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, phys_addr_t offset) +{ + u32 reg = 0xffffffff; + + vgic_reg_access(mmio, ®, offset, + ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); + + return false; +} + +static bool handle_mmio_ctlr(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, phys_addr_t offset) +{ + u32 reg = 0; + + /* + * Force ARE and DS to 1, the guest cannot change this. + * For the time being we only support Group1 interrupts. + */ + if (vcpu->kvm->arch.vgic.enabled) + reg = GICD_CTLR_ENABLE_SS_G1; + reg |= GICD_CTLR_ARE_NS | GICD_CTLR_DS; + + vgic_reg_access(mmio, ®, offset, + ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); + if (mmio->is_write) { + if (reg & GICD_CTLR_ENABLE_SS_G0) + kvm_info("guest tried to enable unsupported Group0 interrupts\n"); + vcpu->kvm->arch.vgic.enabled = !!(reg & GICD_CTLR_ENABLE_SS_G1); + vgic_update_state(vcpu->kvm); + return true; + } + return false; +} + +/* + * As this implementation does not provide compatibility + * with GICv2 (ARE==1), we report zero CPUs in bits [5..7]. + * Also LPIs and MBIs are not supported, so we set the respective bits to 0. + * Also we report at most 2**10=1024 interrupt IDs (to match 1024 SPIs). + */ +#define INTERRUPT_ID_BITS 10 +static bool handle_mmio_typer(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, phys_addr_t offset) +{ + u32 reg; + + reg = (min(vcpu->kvm->arch.vgic.nr_irqs, 1024) >> 5) - 1; + + reg |= (INTERRUPT_ID_BITS - 1) << 19; + + vgic_reg_access(mmio, ®, offset, + ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); + + return false; +} + +static bool handle_mmio_iidr(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, phys_addr_t offset) +{ + u32 reg; + + reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0); + vgic_reg_access(mmio, ®, offset, + ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); + + return false; +} + +static bool handle_mmio_set_enable_reg_dist(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8)) + return vgic_handle_enable_reg(vcpu->kvm, mmio, offset, + vcpu->vcpu_id, + ACCESS_WRITE_SETBIT); + + vgic_reg_access(mmio, NULL, offset, + ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED); + return false; +} + +static bool handle_mmio_clear_enable_reg_dist(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8)) + return vgic_handle_enable_reg(vcpu->kvm, mmio, offset, + vcpu->vcpu_id, + ACCESS_WRITE_CLEARBIT); + + vgic_reg_access(mmio, NULL, offset, + ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED); + return false; +} + +static bool handle_mmio_set_pending_reg_dist(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8)) + return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset, + vcpu->vcpu_id); + + vgic_reg_access(mmio, NULL, offset, + ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED); + return false; +} + +static bool handle_mmio_clear_pending_reg_dist(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8)) + return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset, + vcpu->vcpu_id); + + vgic_reg_access(mmio, NULL, offset, + ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED); + return false; +} + +static bool handle_mmio_priority_reg_dist(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + u32 *reg; + + if (unlikely(offset < VGIC_NR_PRIVATE_IRQS)) { + vgic_reg_access(mmio, NULL, offset, + ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED); + return false; + } + + reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority, + vcpu->vcpu_id, offset); + vgic_reg_access(mmio, reg, offset, + ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); + return false; +} + +static bool handle_mmio_cfg_reg_dist(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + u32 *reg; + + if (unlikely(offset < VGIC_NR_PRIVATE_IRQS / 4)) { + vgic_reg_access(mmio, NULL, offset, + ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED); + return false; + } + + reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg, + vcpu->vcpu_id, offset >> 1); + + return vgic_handle_cfg_reg(reg, mmio, offset); +} + +/* + * We use a compressed version of the MPIDR (all 32 bits in one 32-bit word) + * when we store the target MPIDR written by the guest. + */ +static u32 compress_mpidr(unsigned long mpidr) +{ + u32 ret; + + ret = MPIDR_AFFINITY_LEVEL(mpidr, 0); + ret |= MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8; + ret |= MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16; + ret |= MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24; + + return ret; +} + +static unsigned long uncompress_mpidr(u32 value) +{ + unsigned long mpidr; + + mpidr = ((value >> 0) & 0xFF) << MPIDR_LEVEL_SHIFT(0); + mpidr |= ((value >> 8) & 0xFF) << MPIDR_LEVEL_SHIFT(1); + mpidr |= ((value >> 16) & 0xFF) << MPIDR_LEVEL_SHIFT(2); + mpidr |= (u64)((value >> 24) & 0xFF) << MPIDR_LEVEL_SHIFT(3); + + return mpidr; +} + +/* + * Lookup the given MPIDR value to get the vcpu_id (if there is one) + * and store that in the irq_spi_cpu[] array. + * This limits the number of VCPUs to 255 for now, extending the data + * type (or storing kvm_vcpu pointers) should lift the limit. + * Store the original MPIDR value in an extra array to support read-as-written. + * Unallocated MPIDRs are translated to a special value and caught + * before any array accesses. + */ +static bool handle_mmio_route_reg(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + struct kvm *kvm = vcpu->kvm; + struct vgic_dist *dist = &kvm->arch.vgic; + int spi; + u32 reg; + int vcpu_id; + unsigned long *bmap, mpidr; + + /* + * The upper 32 bits of each 64 bit register are zero, + * as we don't support Aff3. + */ + if ((offset & 4)) { + vgic_reg_access(mmio, NULL, offset, + ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED); + return false; + } + + /* This region only covers SPIs, so no handling of private IRQs here. */ + spi = offset / 8; + + /* get the stored MPIDR for this IRQ */ + mpidr = uncompress_mpidr(dist->irq_spi_mpidr[spi]); + reg = mpidr; + + vgic_reg_access(mmio, ®, offset, + ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); + + if (!mmio->is_write) + return false; + + /* + * Now clear the currently assigned vCPU from the map, making room + * for the new one to be written below + */ + vcpu = kvm_mpidr_to_vcpu(kvm, mpidr); + if (likely(vcpu)) { + vcpu_id = vcpu->vcpu_id; + bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]); + __clear_bit(spi, bmap); + } + + dist->irq_spi_mpidr[spi] = compress_mpidr(reg); + vcpu = kvm_mpidr_to_vcpu(kvm, reg & MPIDR_HWID_BITMASK); + + /* + * The spec says that non-existent MPIDR values should not be + * forwarded to any existent (v)CPU, but should be able to become + * pending anyway. We simply keep the irq_spi_target[] array empty, so + * the interrupt will never be injected. + * irq_spi_cpu[irq] gets a magic value in this case. + */ + if (likely(vcpu)) { + vcpu_id = vcpu->vcpu_id; + dist->irq_spi_cpu[spi] = vcpu_id; + bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]); + __set_bit(spi, bmap); + } else { + dist->irq_spi_cpu[spi] = VCPU_NOT_ALLOCATED; + } + + vgic_update_state(kvm); + + return true; +} + +/* + * We should be careful about promising too much when a guest reads + * this register. Don't claim to be like any hardware implementation, + * but just report the GIC as version 3 - which is what a Linux guest + * would check. + */ +static bool handle_mmio_idregs(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + u32 reg = 0; + + switch (offset + GICD_IDREGS) { + case GICD_PIDR2: + reg = 0x3b; + break; + } + + vgic_reg_access(mmio, ®, offset, + ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); + + return false; +} + +static const struct vgic_io_range vgic_v3_dist_ranges[] = { + { + .base = GICD_CTLR, + .len = 0x04, + .bits_per_irq = 0, + .handle_mmio = handle_mmio_ctlr, + }, + { + .base = GICD_TYPER, + .len = 0x04, + .bits_per_irq = 0, + .handle_mmio = handle_mmio_typer, + }, + { + .base = GICD_IIDR, + .len = 0x04, + .bits_per_irq = 0, + .handle_mmio = handle_mmio_iidr, + }, + { + /* this register is optional, it is RAZ/WI if not implemented */ + .base = GICD_STATUSR, + .len = 0x04, + .bits_per_irq = 0, + .handle_mmio = handle_mmio_raz_wi, + }, + { + /* this write only register is WI when TYPER.MBIS=0 */ + .base = GICD_SETSPI_NSR, + .len = 0x04, + .bits_per_irq = 0, + .handle_mmio = handle_mmio_raz_wi, + }, + { + /* this write only register is WI when TYPER.MBIS=0 */ + .base = GICD_CLRSPI_NSR, + .len = 0x04, + .bits_per_irq = 0, + .handle_mmio = handle_mmio_raz_wi, + }, + { + /* this is RAZ/WI when DS=1 */ + .base = GICD_SETSPI_SR, + .len = 0x04, + .bits_per_irq = 0, + .handle_mmio = handle_mmio_raz_wi, + }, + { + /* this is RAZ/WI when DS=1 */ + .base = GICD_CLRSPI_SR, + .len = 0x04, + .bits_per_irq = 0, + .handle_mmio = handle_mmio_raz_wi, + }, + { + .base = GICD_IGROUPR, + .len = 0x80, + .bits_per_irq = 1, + .handle_mmio = handle_mmio_rao_wi, + }, + { + .base = GICD_ISENABLER, + .len = 0x80, + .bits_per_irq = 1, + .handle_mmio = handle_mmio_set_enable_reg_dist, + }, + { + .base = GICD_ICENABLER, + .len = 0x80, + .bits_per_irq = 1, + .handle_mmio = handle_mmio_clear_enable_reg_dist, + }, + { + .base = GICD_ISPENDR, + .len = 0x80, + .bits_per_irq = 1, + .handle_mmio = handle_mmio_set_pending_reg_dist, + }, + { + .base = GICD_ICPENDR, + .len = 0x80, + .bits_per_irq = 1, + .handle_mmio = handle_mmio_clear_pending_reg_dist, + }, + { + .base = GICD_ISACTIVER, + .len = 0x80, + .bits_per_irq = 1, + .handle_mmio = handle_mmio_raz_wi, + }, + { + .base = GICD_ICACTIVER, + .len = 0x80, + .bits_per_irq = 1, + .handle_mmio = handle_mmio_raz_wi, + }, + { + .base = GICD_IPRIORITYR, + .len = 0x400, + .bits_per_irq = 8, + .handle_mmio = handle_mmio_priority_reg_dist, + }, + { + /* TARGETSRn is RES0 when ARE=1 */ + .base = GICD_ITARGETSR, + .len = 0x400, + .bits_per_irq = 8, + .handle_mmio = handle_mmio_raz_wi, + }, + { + .base = GICD_ICFGR, + .len = 0x100, + .bits_per_irq = 2, + .handle_mmio = handle_mmio_cfg_reg_dist, + }, + { + /* this is RAZ/WI when DS=1 */ + .base = GICD_IGRPMODR, + .len = 0x80, + .bits_per_irq = 1, + .handle_mmio = handle_mmio_raz_wi, + }, + { + /* this is RAZ/WI when DS=1 */ + .base = GICD_NSACR, + .len = 0x100, + .bits_per_irq = 2, + .handle_mmio = handle_mmio_raz_wi, + }, + { + /* this is RAZ/WI when ARE=1 */ + .base = GICD_SGIR, + .len = 0x04, + .handle_mmio = handle_mmio_raz_wi, + }, + { + /* this is RAZ/WI when ARE=1 */ + .base = GICD_CPENDSGIR, + .len = 0x10, + .handle_mmio = handle_mmio_raz_wi, + }, + { + /* this is RAZ/WI when ARE=1 */ + .base = GICD_SPENDSGIR, + .len = 0x10, + .handle_mmio = handle_mmio_raz_wi, + }, + { + .base = GICD_IROUTER + 0x100, + .len = 0x1ee0, + .bits_per_irq = 64, + .handle_mmio = handle_mmio_route_reg, + }, + { + .base = GICD_IDREGS, + .len = 0x30, + .bits_per_irq = 0, + .handle_mmio = handle_mmio_idregs, + }, + {}, +}; + +static bool handle_mmio_ctlr_redist(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + /* since we don't support LPIs, this register is zero for now */ + vgic_reg_access(mmio, NULL, offset, + ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED); + return false; +} + +static bool handle_mmio_typer_redist(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + u32 reg; + u64 mpidr; + struct kvm_vcpu *redist_vcpu = mmio->private; + int target_vcpu_id = redist_vcpu->vcpu_id; + + /* the upper 32 bits contain the affinity value */ + if ((offset & ~3) == 4) { + mpidr = kvm_vcpu_get_mpidr_aff(redist_vcpu); + reg = compress_mpidr(mpidr); + + vgic_reg_access(mmio, ®, offset, + ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); + return false; + } + + reg = redist_vcpu->vcpu_id << 8; + if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1) + reg |= GICR_TYPER_LAST; + vgic_reg_access(mmio, ®, offset, + ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); + return false; +} + +static bool handle_mmio_set_enable_reg_redist(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + struct kvm_vcpu *redist_vcpu = mmio->private; + + return vgic_handle_enable_reg(vcpu->kvm, mmio, offset, + redist_vcpu->vcpu_id, + ACCESS_WRITE_SETBIT); +} + +static bool handle_mmio_clear_enable_reg_redist(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + struct kvm_vcpu *redist_vcpu = mmio->private; + + return vgic_handle_enable_reg(vcpu->kvm, mmio, offset, + redist_vcpu->vcpu_id, + ACCESS_WRITE_CLEARBIT); +} + +static bool handle_mmio_set_pending_reg_redist(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + struct kvm_vcpu *redist_vcpu = mmio->private; + + return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset, + redist_vcpu->vcpu_id); +} + +static bool handle_mmio_clear_pending_reg_redist(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + struct kvm_vcpu *redist_vcpu = mmio->private; + + return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset, + redist_vcpu->vcpu_id); +} + +static bool handle_mmio_priority_reg_redist(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + struct kvm_vcpu *redist_vcpu = mmio->private; + u32 *reg; + + reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority, + redist_vcpu->vcpu_id, offset); + vgic_reg_access(mmio, reg, offset, + ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); + return false; +} + +static bool handle_mmio_cfg_reg_redist(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + struct kvm_vcpu *redist_vcpu = mmio->private; + + u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg, + redist_vcpu->vcpu_id, offset >> 1); + + return vgic_handle_cfg_reg(reg, mmio, offset); +} + +#define SGI_base(x) ((x) + SZ_64K) + +static const struct vgic_io_range vgic_redist_ranges[] = { + { + .base = GICR_CTLR, + .len = 0x04, + .bits_per_irq = 0, + .handle_mmio = handle_mmio_ctlr_redist, + }, + { + .base = GICR_TYPER, + .len = 0x08, + .bits_per_irq = 0, + .handle_mmio = handle_mmio_typer_redist, + }, + { + .base = GICR_IIDR, + .len = 0x04, + .bits_per_irq = 0, + .handle_mmio = handle_mmio_iidr, + }, + { + .base = GICR_WAKER, + .len = 0x04, + .bits_per_irq = 0, + .handle_mmio = handle_mmio_raz_wi, + }, + { + .base = GICR_IDREGS, + .len = 0x30, + .bits_per_irq = 0, + .handle_mmio = handle_mmio_idregs, + }, + { + .base = SGI_base(GICR_IGROUPR0), + .len = 0x04, + .bits_per_irq = 1, + .handle_mmio = handle_mmio_rao_wi, + }, + { + .base = SGI_base(GICR_ISENABLER0), + .len = 0x04, + .bits_per_irq = 1, + .handle_mmio = handle_mmio_set_enable_reg_redist, + }, + { + .base = SGI_base(GICR_ICENABLER0), + .len = 0x04, + .bits_per_irq = 1, + .handle_mmio = handle_mmio_clear_enable_reg_redist, + }, + { + .base = SGI_base(GICR_ISPENDR0), + .len = 0x04, + .bits_per_irq = 1, + .handle_mmio = handle_mmio_set_pending_reg_redist, + }, + { + .base = SGI_base(GICR_ICPENDR0), + .len = 0x04, + .bits_per_irq = 1, + .handle_mmio = handle_mmio_clear_pending_reg_redist, + }, + { + .base = SGI_base(GICR_ISACTIVER0), + .len = 0x04, + .bits_per_irq = 1, + .handle_mmio = handle_mmio_raz_wi, + }, + { + .base = SGI_base(GICR_ICACTIVER0), + .len = 0x04, + .bits_per_irq = 1, + .handle_mmio = handle_mmio_raz_wi, + }, + { + .base = SGI_base(GICR_IPRIORITYR0), + .len = 0x20, + .bits_per_irq = 8, + .handle_mmio = handle_mmio_priority_reg_redist, + }, + { + .base = SGI_base(GICR_ICFGR0), + .len = 0x08, + .bits_per_irq = 2, + .handle_mmio = handle_mmio_cfg_reg_redist, + }, + { + .base = SGI_base(GICR_IGRPMODR0), + .len = 0x04, + .bits_per_irq = 1, + .handle_mmio = handle_mmio_raz_wi, + }, + { + .base = SGI_base(GICR_NSACR), + .len = 0x04, + .handle_mmio = handle_mmio_raz_wi, + }, + {}, +}; + +static bool vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, int irq) +{ + if (vgic_queue_irq(vcpu, 0, irq)) { + vgic_dist_irq_clear_pending(vcpu, irq); + vgic_cpu_irq_clear(vcpu, irq); + return true; + } + + return false; +} + +static int vgic_v3_map_resources(struct kvm *kvm, + const struct vgic_params *params) +{ + int ret = 0; + struct vgic_dist *dist = &kvm->arch.vgic; + gpa_t rdbase = dist->vgic_redist_base; + struct vgic_io_device *iodevs = NULL; + int i; + + if (!irqchip_in_kernel(kvm)) + return 0; + + mutex_lock(&kvm->lock); + + if (vgic_ready(kvm)) + goto out; + + if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) || + IS_VGIC_ADDR_UNDEF(dist->vgic_redist_base)) { + kvm_err("Need to set vgic distributor addresses first\n"); + ret = -ENXIO; + goto out; + } + + /* + * For a VGICv3 we require the userland to explicitly initialize + * the VGIC before we need to use it. + */ + if (!vgic_initialized(kvm)) { + ret = -EBUSY; + goto out; + } + + ret = vgic_register_kvm_io_dev(kvm, dist->vgic_dist_base, + GIC_V3_DIST_SIZE, vgic_v3_dist_ranges, + -1, &dist->dist_iodev); + if (ret) + goto out; + + iodevs = kcalloc(dist->nr_cpus, sizeof(iodevs[0]), GFP_KERNEL); + if (!iodevs) { + ret = -ENOMEM; + goto out_unregister; + } + + for (i = 0; i < dist->nr_cpus; i++) { + ret = vgic_register_kvm_io_dev(kvm, rdbase, + SZ_128K, vgic_redist_ranges, + i, &iodevs[i]); + if (ret) + goto out_unregister; + rdbase += GIC_V3_REDIST_SIZE; + } + + dist->redist_iodevs = iodevs; + dist->ready = true; + goto out; + +out_unregister: + kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dist->dist_iodev.dev); + if (iodevs) { + for (i = 0; i < dist->nr_cpus; i++) { + if (iodevs[i].dev.ops) + kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, + &iodevs[i].dev); + } + } + +out: + if (ret) + kvm_vgic_destroy(kvm); + mutex_unlock(&kvm->lock); + return ret; +} + +static int vgic_v3_init_model(struct kvm *kvm) +{ + int i; + u32 mpidr; + struct vgic_dist *dist = &kvm->arch.vgic; + int nr_spis = dist->nr_irqs - VGIC_NR_PRIVATE_IRQS; + + dist->irq_spi_mpidr = kcalloc(nr_spis, sizeof(dist->irq_spi_mpidr[0]), + GFP_KERNEL); + + if (!dist->irq_spi_mpidr) + return -ENOMEM; + + /* Initialize the target VCPUs for each IRQ to VCPU 0 */ + mpidr = compress_mpidr(kvm_vcpu_get_mpidr_aff(kvm_get_vcpu(kvm, 0))); + for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i++) { + dist->irq_spi_cpu[i - VGIC_NR_PRIVATE_IRQS] = 0; + dist->irq_spi_mpidr[i - VGIC_NR_PRIVATE_IRQS] = mpidr; + vgic_bitmap_set_irq_val(dist->irq_spi_target, 0, i, 1); + } + + return 0; +} + +/* GICv3 does not keep track of SGI sources anymore. */ +static void vgic_v3_add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source) +{ +} + +void vgic_v3_init_emulation(struct kvm *kvm) +{ + struct vgic_dist *dist = &kvm->arch.vgic; + + dist->vm_ops.queue_sgi = vgic_v3_queue_sgi; + dist->vm_ops.add_sgi_source = vgic_v3_add_sgi_source; + dist->vm_ops.init_model = vgic_v3_init_model; + dist->vm_ops.map_resources = vgic_v3_map_resources; + + kvm->arch.max_vcpus = KVM_MAX_VCPUS; +} + +/* + * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI + * generation register ICC_SGI1R_EL1) with a given VCPU. + * If the VCPU's MPIDR matches, return the level0 affinity, otherwise + * return -1. + */ +static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu) +{ + unsigned long affinity; + int level0; + + /* + * Split the current VCPU's MPIDR into affinity level 0 and the + * rest as this is what we have to compare against. + */ + affinity = kvm_vcpu_get_mpidr_aff(vcpu); + level0 = MPIDR_AFFINITY_LEVEL(affinity, 0); + affinity &= ~MPIDR_LEVEL_MASK; + + /* bail out if the upper three levels don't match */ + if (sgi_aff != affinity) + return -1; + + /* Is this VCPU's bit set in the mask ? */ + if (!(sgi_cpu_mask & BIT(level0))) + return -1; + + return level0; +} + +#define SGI_AFFINITY_LEVEL(reg, level) \ + ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \ + >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level)) + +/** + * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs + * @vcpu: The VCPU requesting a SGI + * @reg: The value written into the ICC_SGI1R_EL1 register by that VCPU + * + * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register. + * This will trap in sys_regs.c and call this function. + * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the + * target processors as well as a bitmask of 16 Aff0 CPUs. + * If the interrupt routing mode bit is not set, we iterate over all VCPUs to + * check for matching ones. If this bit is set, we signal all, but not the + * calling VCPU. + */ +void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_vcpu *c_vcpu; + struct vgic_dist *dist = &kvm->arch.vgic; + u16 target_cpus; + u64 mpidr; + int sgi, c; + int vcpu_id = vcpu->vcpu_id; + bool broadcast; + int updated = 0; + + sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT; + broadcast = reg & BIT(ICC_SGI1R_IRQ_ROUTING_MODE_BIT); + target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT; + mpidr = SGI_AFFINITY_LEVEL(reg, 3); + mpidr |= SGI_AFFINITY_LEVEL(reg, 2); + mpidr |= SGI_AFFINITY_LEVEL(reg, 1); + + /* + * We take the dist lock here, because we come from the sysregs + * code path and not from the MMIO one (which already takes the lock). + */ + spin_lock(&dist->lock); + + /* + * We iterate over all VCPUs to find the MPIDRs matching the request. + * If we have handled one CPU, we clear it's bit to detect early + * if we are already finished. This avoids iterating through all + * VCPUs when most of the times we just signal a single VCPU. + */ + kvm_for_each_vcpu(c, c_vcpu, kvm) { + + /* Exit early if we have dealt with all requested CPUs */ + if (!broadcast && target_cpus == 0) + break; + + /* Don't signal the calling VCPU */ + if (broadcast && c == vcpu_id) + continue; + + if (!broadcast) { + int level0; + + level0 = match_mpidr(mpidr, target_cpus, c_vcpu); + if (level0 == -1) + continue; + + /* remove this matching VCPU from the mask */ + target_cpus &= ~BIT(level0); + } + + /* Flag the SGI as pending */ + vgic_dist_irq_set_pending(c_vcpu, sgi); + updated = 1; + kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c); + } + if (updated) + vgic_update_state(vcpu->kvm); + spin_unlock(&dist->lock); + if (updated) + vgic_kick_vcpus(vcpu->kvm); +} + +static int vgic_v3_create(struct kvm_device *dev, u32 type) +{ + return kvm_vgic_create(dev->kvm, type); +} + +static void vgic_v3_destroy(struct kvm_device *dev) +{ + kfree(dev); +} + +static int vgic_v3_set_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + int ret; + + ret = vgic_set_common_attr(dev, attr); + if (ret != -ENXIO) + return ret; + + switch (attr->group) { + case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: + case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: + return -ENXIO; + } + + return -ENXIO; +} + +static int vgic_v3_get_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + int ret; + + ret = vgic_get_common_attr(dev, attr); + if (ret != -ENXIO) + return ret; + + switch (attr->group) { + case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: + case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: + return -ENXIO; + } + + return -ENXIO; +} + +static int vgic_v3_has_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + switch (attr->group) { + case KVM_DEV_ARM_VGIC_GRP_ADDR: + switch (attr->attr) { + case KVM_VGIC_V2_ADDR_TYPE_DIST: + case KVM_VGIC_V2_ADDR_TYPE_CPU: + return -ENXIO; + case KVM_VGIC_V3_ADDR_TYPE_DIST: + case KVM_VGIC_V3_ADDR_TYPE_REDIST: + return 0; + } + break; + case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: + case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: + return -ENXIO; + case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: + return 0; + case KVM_DEV_ARM_VGIC_GRP_CTRL: + switch (attr->attr) { + case KVM_DEV_ARM_VGIC_CTRL_INIT: + return 0; + } + } + return -ENXIO; +} + +struct kvm_device_ops kvm_arm_vgic_v3_ops = { + .name = "kvm-arm-vgic-v3", + .create = vgic_v3_create, + .destroy = vgic_v3_destroy, + .set_attr = vgic_v3_set_attr, + .get_attr = vgic_v3_get_attr, + .has_attr = vgic_v3_has_attr, +}; diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c new file mode 100644 index 000000000..dff06021e --- /dev/null +++ b/virt/kvm/arm/vgic-v3.c @@ -0,0 +1,287 @@ +/* + * Copyright (C) 2013 ARM Limited, All Rights Reserved. + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/cpu.h> +#include <linux/kvm.h> +#include <linux/kvm_host.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + +#include <linux/irqchip/arm-gic-v3.h> + +#include <asm/kvm_emulate.h> +#include <asm/kvm_arm.h> +#include <asm/kvm_mmu.h> + +/* These are for GICv2 emulation only */ +#define GICH_LR_VIRTUALID (0x3ffUL << 0) +#define GICH_LR_PHYSID_CPUID_SHIFT (10) +#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT) +#define ICH_LR_VIRTUALID_MASK (BIT_ULL(32) - 1) + +/* + * LRs are stored in reverse order in memory. make sure we index them + * correctly. + */ +#define LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr) + +static u32 ich_vtr_el2; + +static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr) +{ + struct vgic_lr lr_desc; + u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)]; + + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) + lr_desc.irq = val & ICH_LR_VIRTUALID_MASK; + else + lr_desc.irq = val & GICH_LR_VIRTUALID; + + lr_desc.source = 0; + if (lr_desc.irq <= 15 && + vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) + lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7; + + lr_desc.state = 0; + + if (val & ICH_LR_PENDING_BIT) + lr_desc.state |= LR_STATE_PENDING; + if (val & ICH_LR_ACTIVE_BIT) + lr_desc.state |= LR_STATE_ACTIVE; + if (val & ICH_LR_EOI) + lr_desc.state |= LR_EOI_INT; + + return lr_desc; +} + +static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr, + struct vgic_lr lr_desc) +{ + u64 lr_val; + + lr_val = lr_desc.irq; + + /* + * Currently all guest IRQs are Group1, as Group0 would result + * in a FIQ in the guest, which it wouldn't expect. + * Eventually we want to make this configurable, so we may revisit + * this in the future. + */ + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) + lr_val |= ICH_LR_GROUP; + else + lr_val |= (u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT; + + if (lr_desc.state & LR_STATE_PENDING) + lr_val |= ICH_LR_PENDING_BIT; + if (lr_desc.state & LR_STATE_ACTIVE) + lr_val |= ICH_LR_ACTIVE_BIT; + if (lr_desc.state & LR_EOI_INT) + lr_val |= ICH_LR_EOI; + + vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val; +} + +static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, + struct vgic_lr lr_desc) +{ + if (!(lr_desc.state & LR_STATE_MASK)) + vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr); + else + vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr &= ~(1U << lr); +} + +static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu) +{ + return vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr; +} + +static u64 vgic_v3_get_eisr(const struct kvm_vcpu *vcpu) +{ + return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr; +} + +static void vgic_v3_clear_eisr(struct kvm_vcpu *vcpu) +{ + vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr = 0; +} + +static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu) +{ + u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr; + u32 ret = 0; + + if (misr & ICH_MISR_EOI) + ret |= INT_STATUS_EOI; + if (misr & ICH_MISR_U) + ret |= INT_STATUS_UNDERFLOW; + + return ret; +} + +static void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) +{ + u32 vmcr = vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr; + + vmcrp->ctlr = (vmcr & ICH_VMCR_CTLR_MASK) >> ICH_VMCR_CTLR_SHIFT; + vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT; + vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT; + vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; +} + +static void vgic_v3_enable_underflow(struct kvm_vcpu *vcpu) +{ + vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr |= ICH_HCR_UIE; +} + +static void vgic_v3_disable_underflow(struct kvm_vcpu *vcpu) +{ + vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr &= ~ICH_HCR_UIE; +} + +static void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) +{ + u32 vmcr; + + vmcr = (vmcrp->ctlr << ICH_VMCR_CTLR_SHIFT) & ICH_VMCR_CTLR_MASK; + vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK; + vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK; + vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK; + + vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = vmcr; +} + +static void vgic_v3_enable(struct kvm_vcpu *vcpu) +{ + struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3; + + /* + * By forcing VMCR to zero, the GIC will restore the binary + * points to their reset values. Anything else resets to zero + * anyway. + */ + vgic_v3->vgic_vmcr = 0; + + /* + * If we are emulating a GICv3, we do it in an non-GICv2-compatible + * way, so we force SRE to 1 to demonstrate this to the guest. + * This goes with the spec allowing the value to be RAO/WI. + */ + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) + vgic_v3->vgic_sre = ICC_SRE_EL1_SRE; + else + vgic_v3->vgic_sre = 0; + + /* Get the show on the road... */ + vgic_v3->vgic_hcr = ICH_HCR_EN; +} + +static const struct vgic_ops vgic_v3_ops = { + .get_lr = vgic_v3_get_lr, + .set_lr = vgic_v3_set_lr, + .sync_lr_elrsr = vgic_v3_sync_lr_elrsr, + .get_elrsr = vgic_v3_get_elrsr, + .get_eisr = vgic_v3_get_eisr, + .clear_eisr = vgic_v3_clear_eisr, + .get_interrupt_status = vgic_v3_get_interrupt_status, + .enable_underflow = vgic_v3_enable_underflow, + .disable_underflow = vgic_v3_disable_underflow, + .get_vmcr = vgic_v3_get_vmcr, + .set_vmcr = vgic_v3_set_vmcr, + .enable = vgic_v3_enable, +}; + +static struct vgic_params vgic_v3_params; + +/** + * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT + * @node: pointer to the DT node + * @ops: address of a pointer to the GICv3 operations + * @params: address of a pointer to HW-specific parameters + * + * Returns 0 if a GICv3 has been found, with the low level operations + * in *ops and the HW parameters in *params. Returns an error code + * otherwise. + */ +int vgic_v3_probe(struct device_node *vgic_node, + const struct vgic_ops **ops, + const struct vgic_params **params) +{ + int ret = 0; + u32 gicv_idx; + struct resource vcpu_res; + struct vgic_params *vgic = &vgic_v3_params; + + vgic->maint_irq = irq_of_parse_and_map(vgic_node, 0); + if (!vgic->maint_irq) { + kvm_err("error getting vgic maintenance irq from DT\n"); + ret = -ENXIO; + goto out; + } + + ich_vtr_el2 = kvm_call_hyp(__vgic_v3_get_ich_vtr_el2); + + /* + * The ListRegs field is 5 bits, but there is a architectural + * maximum of 16 list registers. Just ignore bit 4... + */ + vgic->nr_lr = (ich_vtr_el2 & 0xf) + 1; + vgic->can_emulate_gicv2 = false; + + if (of_property_read_u32(vgic_node, "#redistributor-regions", &gicv_idx)) + gicv_idx = 1; + + gicv_idx += 3; /* Also skip GICD, GICC, GICH */ + if (of_address_to_resource(vgic_node, gicv_idx, &vcpu_res)) { + kvm_info("GICv3: no GICV resource entry\n"); + vgic->vcpu_base = 0; + } else if (!PAGE_ALIGNED(vcpu_res.start)) { + pr_warn("GICV physical address 0x%llx not page aligned\n", + (unsigned long long)vcpu_res.start); + vgic->vcpu_base = 0; + } else if (!PAGE_ALIGNED(resource_size(&vcpu_res))) { + pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n", + (unsigned long long)resource_size(&vcpu_res), + PAGE_SIZE); + vgic->vcpu_base = 0; + } else { + vgic->vcpu_base = vcpu_res.start; + vgic->can_emulate_gicv2 = true; + kvm_register_device_ops(&kvm_arm_vgic_v2_ops, + KVM_DEV_TYPE_ARM_VGIC_V2); + } + if (vgic->vcpu_base == 0) + kvm_info("disabling GICv2 emulation\n"); + kvm_register_device_ops(&kvm_arm_vgic_v3_ops, KVM_DEV_TYPE_ARM_VGIC_V3); + + vgic->vctrl_base = NULL; + vgic->type = VGIC_V3; + vgic->max_gic_vcpus = KVM_MAX_VCPUS; + + kvm_info("%s@%llx IRQ%d\n", vgic_node->name, + vcpu_res.start, vgic->maint_irq); + + *ops = &vgic_v3_ops; + *params = vgic; + +out: + of_node_put(vgic_node); + return ret; +} diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c new file mode 100644 index 000000000..950064a09 --- /dev/null +++ b/virt/kvm/arm/vgic.c @@ -0,0 +1,2173 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/cpu.h> +#include <linux/kvm.h> +#include <linux/kvm_host.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/uaccess.h> + +#include <linux/irqchip/arm-gic.h> + +#include <asm/kvm_emulate.h> +#include <asm/kvm_arm.h> +#include <asm/kvm_mmu.h> +#include <trace/events/kvm.h> +#include <asm/kvm.h> +#include <kvm/iodev.h> + +/* + * How the whole thing works (courtesy of Christoffer Dall): + * + * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if + * something is pending on the CPU interface. + * - Interrupts that are pending on the distributor are stored on the + * vgic.irq_pending vgic bitmap (this bitmap is updated by both user land + * ioctls and guest mmio ops, and other in-kernel peripherals such as the + * arch. timers). + * - Every time the bitmap changes, the irq_pending_on_cpu oracle is + * recalculated + * - To calculate the oracle, we need info for each cpu from + * compute_pending_for_cpu, which considers: + * - PPI: dist->irq_pending & dist->irq_enable + * - SPI: dist->irq_pending & dist->irq_enable & dist->irq_spi_target + * - irq_spi_target is a 'formatted' version of the GICD_ITARGETSRn + * registers, stored on each vcpu. We only keep one bit of + * information per interrupt, making sure that only one vcpu can + * accept the interrupt. + * - If any of the above state changes, we must recalculate the oracle. + * - The same is true when injecting an interrupt, except that we only + * consider a single interrupt at a time. The irq_spi_cpu array + * contains the target CPU for each SPI. + * + * The handling of level interrupts adds some extra complexity. We + * need to track when the interrupt has been EOIed, so we can sample + * the 'line' again. This is achieved as such: + * + * - When a level interrupt is moved onto a vcpu, the corresponding + * bit in irq_queued is set. As long as this bit is set, the line + * will be ignored for further interrupts. The interrupt is injected + * into the vcpu with the GICH_LR_EOI bit set (generate a + * maintenance interrupt on EOI). + * - When the interrupt is EOIed, the maintenance interrupt fires, + * and clears the corresponding bit in irq_queued. This allows the + * interrupt line to be sampled again. + * - Note that level-triggered interrupts can also be set to pending from + * writes to GICD_ISPENDRn and lowering the external input line does not + * cause the interrupt to become inactive in such a situation. + * Conversely, writes to GICD_ICPENDRn do not cause the interrupt to become + * inactive as long as the external input line is held high. + */ + +#include "vgic.h" + +static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu); +static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu); +static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr); +static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc); + +static const struct vgic_ops *vgic_ops; +static const struct vgic_params *vgic; + +static void add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source) +{ + vcpu->kvm->arch.vgic.vm_ops.add_sgi_source(vcpu, irq, source); +} + +static bool queue_sgi(struct kvm_vcpu *vcpu, int irq) +{ + return vcpu->kvm->arch.vgic.vm_ops.queue_sgi(vcpu, irq); +} + +int kvm_vgic_map_resources(struct kvm *kvm) +{ + return kvm->arch.vgic.vm_ops.map_resources(kvm, vgic); +} + +/* + * struct vgic_bitmap contains a bitmap made of unsigned longs, but + * extracts u32s out of them. + * + * This does not work on 64-bit BE systems, because the bitmap access + * will store two consecutive 32-bit words with the higher-addressed + * register's bits at the lower index and the lower-addressed register's + * bits at the higher index. + * + * Therefore, swizzle the register index when accessing the 32-bit word + * registers to access the right register's value. + */ +#if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 64 +#define REG_OFFSET_SWIZZLE 1 +#else +#define REG_OFFSET_SWIZZLE 0 +#endif + +static int vgic_init_bitmap(struct vgic_bitmap *b, int nr_cpus, int nr_irqs) +{ + int nr_longs; + + nr_longs = nr_cpus + BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS); + + b->private = kzalloc(sizeof(unsigned long) * nr_longs, GFP_KERNEL); + if (!b->private) + return -ENOMEM; + + b->shared = b->private + nr_cpus; + + return 0; +} + +static void vgic_free_bitmap(struct vgic_bitmap *b) +{ + kfree(b->private); + b->private = NULL; + b->shared = NULL; +} + +/* + * Call this function to convert a u64 value to an unsigned long * bitmask + * in a way that works on both 32-bit and 64-bit LE and BE platforms. + * + * Warning: Calling this function may modify *val. + */ +static unsigned long *u64_to_bitmask(u64 *val) +{ +#if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32 + *val = (*val >> 32) | (*val << 32); +#endif + return (unsigned long *)val; +} + +u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, int cpuid, u32 offset) +{ + offset >>= 2; + if (!offset) + return (u32 *)(x->private + cpuid) + REG_OFFSET_SWIZZLE; + else + return (u32 *)(x->shared) + ((offset - 1) ^ REG_OFFSET_SWIZZLE); +} + +static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x, + int cpuid, int irq) +{ + if (irq < VGIC_NR_PRIVATE_IRQS) + return test_bit(irq, x->private + cpuid); + + return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared); +} + +void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid, + int irq, int val) +{ + unsigned long *reg; + + if (irq < VGIC_NR_PRIVATE_IRQS) { + reg = x->private + cpuid; + } else { + reg = x->shared; + irq -= VGIC_NR_PRIVATE_IRQS; + } + + if (val) + set_bit(irq, reg); + else + clear_bit(irq, reg); +} + +static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid) +{ + return x->private + cpuid; +} + +unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x) +{ + return x->shared; +} + +static int vgic_init_bytemap(struct vgic_bytemap *x, int nr_cpus, int nr_irqs) +{ + int size; + + size = nr_cpus * VGIC_NR_PRIVATE_IRQS; + size += nr_irqs - VGIC_NR_PRIVATE_IRQS; + + x->private = kzalloc(size, GFP_KERNEL); + if (!x->private) + return -ENOMEM; + + x->shared = x->private + nr_cpus * VGIC_NR_PRIVATE_IRQS / sizeof(u32); + return 0; +} + +static void vgic_free_bytemap(struct vgic_bytemap *b) +{ + kfree(b->private); + b->private = NULL; + b->shared = NULL; +} + +u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset) +{ + u32 *reg; + + if (offset < VGIC_NR_PRIVATE_IRQS) { + reg = x->private; + offset += cpuid * VGIC_NR_PRIVATE_IRQS; + } else { + reg = x->shared; + offset -= VGIC_NR_PRIVATE_IRQS; + } + + return reg + (offset / sizeof(u32)); +} + +#define VGIC_CFG_LEVEL 0 +#define VGIC_CFG_EDGE 1 + +static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + int irq_val; + + irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq); + return irq_val == VGIC_CFG_EDGE; +} + +static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + + return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq); +} + +static int vgic_irq_is_queued(struct kvm_vcpu *vcpu, int irq) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + + return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq); +} + +static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + + return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq); +} + +static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + + vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 1); +} + +static void vgic_irq_clear_queued(struct kvm_vcpu *vcpu, int irq) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + + vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0); +} + +static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + + vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1); +} + +static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + + vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0); +} + +static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + + return vgic_bitmap_get_irq_val(&dist->irq_level, vcpu->vcpu_id, irq); +} + +static void vgic_dist_irq_set_level(struct kvm_vcpu *vcpu, int irq) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + + vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 1); +} + +static void vgic_dist_irq_clear_level(struct kvm_vcpu *vcpu, int irq) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + + vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 0); +} + +static int vgic_dist_irq_soft_pend(struct kvm_vcpu *vcpu, int irq) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + + return vgic_bitmap_get_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq); +} + +static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu *vcpu, int irq) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + + vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0); +} + +static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + + return vgic_bitmap_get_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq); +} + +void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + + vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 1); +} + +void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + + vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 0); +} + +static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq) +{ + if (irq < VGIC_NR_PRIVATE_IRQS) + set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu); + else + set_bit(irq - VGIC_NR_PRIVATE_IRQS, + vcpu->arch.vgic_cpu.pending_shared); +} + +void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq) +{ + if (irq < VGIC_NR_PRIVATE_IRQS) + clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu); + else + clear_bit(irq - VGIC_NR_PRIVATE_IRQS, + vcpu->arch.vgic_cpu.pending_shared); +} + +static bool vgic_can_sample_irq(struct kvm_vcpu *vcpu, int irq) +{ + return vgic_irq_is_edge(vcpu, irq) || !vgic_irq_is_queued(vcpu, irq); +} + +/** + * vgic_reg_access - access vgic register + * @mmio: pointer to the data describing the mmio access + * @reg: pointer to the virtual backing of vgic distributor data + * @offset: least significant 2 bits used for word offset + * @mode: ACCESS_ mode (see defines above) + * + * Helper to make vgic register access easier using one of the access + * modes defined for vgic register access + * (read,raz,write-ignored,setbit,clearbit,write) + */ +void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg, + phys_addr_t offset, int mode) +{ + int word_offset = (offset & 3) * 8; + u32 mask = (1UL << (mmio->len * 8)) - 1; + u32 regval; + + /* + * Any alignment fault should have been delivered to the guest + * directly (ARM ARM B3.12.7 "Prioritization of aborts"). + */ + + if (reg) { + regval = *reg; + } else { + BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED)); + regval = 0; + } + + if (mmio->is_write) { + u32 data = mmio_data_read(mmio, mask) << word_offset; + switch (ACCESS_WRITE_MASK(mode)) { + case ACCESS_WRITE_IGNORED: + return; + + case ACCESS_WRITE_SETBIT: + regval |= data; + break; + + case ACCESS_WRITE_CLEARBIT: + regval &= ~data; + break; + + case ACCESS_WRITE_VALUE: + regval = (regval & ~(mask << word_offset)) | data; + break; + } + *reg = regval; + } else { + switch (ACCESS_READ_MASK(mode)) { + case ACCESS_READ_RAZ: + regval = 0; + /* fall through */ + + case ACCESS_READ_VALUE: + mmio_data_write(mmio, mask, regval >> word_offset); + } + } +} + +bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + vgic_reg_access(mmio, NULL, offset, + ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED); + return false; +} + +bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, + phys_addr_t offset, int vcpu_id, int access) +{ + u32 *reg; + int mode = ACCESS_READ_VALUE | access; + struct kvm_vcpu *target_vcpu = kvm_get_vcpu(kvm, vcpu_id); + + reg = vgic_bitmap_get_reg(&kvm->arch.vgic.irq_enabled, vcpu_id, offset); + vgic_reg_access(mmio, reg, offset, mode); + if (mmio->is_write) { + if (access & ACCESS_WRITE_CLEARBIT) { + if (offset < 4) /* Force SGI enabled */ + *reg |= 0xffff; + vgic_retire_disabled_irqs(target_vcpu); + } + vgic_update_state(kvm); + return true; + } + + return false; +} + +bool vgic_handle_set_pending_reg(struct kvm *kvm, + struct kvm_exit_mmio *mmio, + phys_addr_t offset, int vcpu_id) +{ + u32 *reg, orig; + u32 level_mask; + int mode = ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT; + struct vgic_dist *dist = &kvm->arch.vgic; + + reg = vgic_bitmap_get_reg(&dist->irq_cfg, vcpu_id, offset); + level_mask = (~(*reg)); + + /* Mark both level and edge triggered irqs as pending */ + reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset); + orig = *reg; + vgic_reg_access(mmio, reg, offset, mode); + + if (mmio->is_write) { + /* Set the soft-pending flag only for level-triggered irqs */ + reg = vgic_bitmap_get_reg(&dist->irq_soft_pend, + vcpu_id, offset); + vgic_reg_access(mmio, reg, offset, mode); + *reg &= level_mask; + + /* Ignore writes to SGIs */ + if (offset < 2) { + *reg &= ~0xffff; + *reg |= orig & 0xffff; + } + + vgic_update_state(kvm); + return true; + } + + return false; +} + +bool vgic_handle_clear_pending_reg(struct kvm *kvm, + struct kvm_exit_mmio *mmio, + phys_addr_t offset, int vcpu_id) +{ + u32 *level_active; + u32 *reg, orig; + int mode = ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT; + struct vgic_dist *dist = &kvm->arch.vgic; + + reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset); + orig = *reg; + vgic_reg_access(mmio, reg, offset, mode); + if (mmio->is_write) { + /* Re-set level triggered level-active interrupts */ + level_active = vgic_bitmap_get_reg(&dist->irq_level, + vcpu_id, offset); + reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset); + *reg |= *level_active; + + /* Ignore writes to SGIs */ + if (offset < 2) { + *reg &= ~0xffff; + *reg |= orig & 0xffff; + } + + /* Clear soft-pending flags */ + reg = vgic_bitmap_get_reg(&dist->irq_soft_pend, + vcpu_id, offset); + vgic_reg_access(mmio, reg, offset, mode); + + vgic_update_state(kvm); + return true; + } + return false; +} + +bool vgic_handle_set_active_reg(struct kvm *kvm, + struct kvm_exit_mmio *mmio, + phys_addr_t offset, int vcpu_id) +{ + u32 *reg; + struct vgic_dist *dist = &kvm->arch.vgic; + + reg = vgic_bitmap_get_reg(&dist->irq_active, vcpu_id, offset); + vgic_reg_access(mmio, reg, offset, + ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT); + + if (mmio->is_write) { + vgic_update_state(kvm); + return true; + } + + return false; +} + +bool vgic_handle_clear_active_reg(struct kvm *kvm, + struct kvm_exit_mmio *mmio, + phys_addr_t offset, int vcpu_id) +{ + u32 *reg; + struct vgic_dist *dist = &kvm->arch.vgic; + + reg = vgic_bitmap_get_reg(&dist->irq_active, vcpu_id, offset); + vgic_reg_access(mmio, reg, offset, + ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT); + + if (mmio->is_write) { + vgic_update_state(kvm); + return true; + } + + return false; +} + +static u32 vgic_cfg_expand(u16 val) +{ + u32 res = 0; + int i; + + /* + * Turn a 16bit value like abcd...mnop into a 32bit word + * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is. + */ + for (i = 0; i < 16; i++) + res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1); + + return res; +} + +static u16 vgic_cfg_compress(u32 val) +{ + u16 res = 0; + int i; + + /* + * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like + * abcd...mnop which is what we really care about. + */ + for (i = 0; i < 16; i++) + res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i; + + return res; +} + +/* + * The distributor uses 2 bits per IRQ for the CFG register, but the + * LSB is always 0. As such, we only keep the upper bit, and use the + * two above functions to compress/expand the bits + */ +bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio, + phys_addr_t offset) +{ + u32 val; + + if (offset & 4) + val = *reg >> 16; + else + val = *reg & 0xffff; + + val = vgic_cfg_expand(val); + vgic_reg_access(mmio, &val, offset, + ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); + if (mmio->is_write) { + if (offset < 8) { + *reg = ~0U; /* Force PPIs/SGIs to 1 */ + return false; + } + + val = vgic_cfg_compress(val); + if (offset & 4) { + *reg &= 0xffff; + *reg |= val << 16; + } else { + *reg &= 0xffff << 16; + *reg |= val; + } + } + + return false; +} + +/** + * vgic_unqueue_irqs - move pending/active IRQs from LRs to the distributor + * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs + * + * Move any IRQs that have already been assigned to LRs back to the + * emulated distributor state so that the complete emulated state can be read + * from the main emulation structures without investigating the LRs. + */ +void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + int i; + + for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) { + struct vgic_lr lr = vgic_get_lr(vcpu, i); + + /* + * There are three options for the state bits: + * + * 01: pending + * 10: active + * 11: pending and active + */ + BUG_ON(!(lr.state & LR_STATE_MASK)); + + /* Reestablish SGI source for pending and active IRQs */ + if (lr.irq < VGIC_NR_SGIS) + add_sgi_source(vcpu, lr.irq, lr.source); + + /* + * If the LR holds an active (10) or a pending and active (11) + * interrupt then move the active state to the + * distributor tracking bit. + */ + if (lr.state & LR_STATE_ACTIVE) { + vgic_irq_set_active(vcpu, lr.irq); + lr.state &= ~LR_STATE_ACTIVE; + } + + /* + * Reestablish the pending state on the distributor and the + * CPU interface. It may have already been pending, but that + * is fine, then we are only setting a few bits that were + * already set. + */ + if (lr.state & LR_STATE_PENDING) { + vgic_dist_irq_set_pending(vcpu, lr.irq); + lr.state &= ~LR_STATE_PENDING; + } + + vgic_set_lr(vcpu, i, lr); + + /* + * Mark the LR as free for other use. + */ + BUG_ON(lr.state & LR_STATE_MASK); + vgic_retire_lr(i, lr.irq, vcpu); + vgic_irq_clear_queued(vcpu, lr.irq); + + /* Finally update the VGIC state. */ + vgic_update_state(vcpu->kvm); + } +} + +const +struct vgic_io_range *vgic_find_range(const struct vgic_io_range *ranges, + int len, gpa_t offset) +{ + while (ranges->len) { + if (offset >= ranges->base && + (offset + len) <= (ranges->base + ranges->len)) + return ranges; + ranges++; + } + + return NULL; +} + +static bool vgic_validate_access(const struct vgic_dist *dist, + const struct vgic_io_range *range, + unsigned long offset) +{ + int irq; + + if (!range->bits_per_irq) + return true; /* Not an irq-based access */ + + irq = offset * 8 / range->bits_per_irq; + if (irq >= dist->nr_irqs) + return false; + + return true; +} + +/* + * Call the respective handler function for the given range. + * We split up any 64 bit accesses into two consecutive 32 bit + * handler calls and merge the result afterwards. + * We do this in a little endian fashion regardless of the host's + * or guest's endianness, because the GIC is always LE and the rest of + * the code (vgic_reg_access) also puts it in a LE fashion already. + * At this point we have already identified the handle function, so + * range points to that one entry and offset is relative to this. + */ +static bool call_range_handler(struct kvm_vcpu *vcpu, + struct kvm_exit_mmio *mmio, + unsigned long offset, + const struct vgic_io_range *range) +{ + struct kvm_exit_mmio mmio32; + bool ret; + + if (likely(mmio->len <= 4)) + return range->handle_mmio(vcpu, mmio, offset); + + /* + * Any access bigger than 4 bytes (that we currently handle in KVM) + * is actually 8 bytes long, caused by a 64-bit access + */ + + mmio32.len = 4; + mmio32.is_write = mmio->is_write; + mmio32.private = mmio->private; + + mmio32.phys_addr = mmio->phys_addr + 4; + mmio32.data = &((u32 *)mmio->data)[1]; + ret = range->handle_mmio(vcpu, &mmio32, offset + 4); + + mmio32.phys_addr = mmio->phys_addr; + mmio32.data = &((u32 *)mmio->data)[0]; + ret |= range->handle_mmio(vcpu, &mmio32, offset); + + return ret; +} + +/** + * vgic_handle_mmio_access - handle an in-kernel MMIO access + * This is called by the read/write KVM IO device wrappers below. + * @vcpu: pointer to the vcpu performing the access + * @this: pointer to the KVM IO device in charge + * @addr: guest physical address of the access + * @len: size of the access + * @val: pointer to the data region + * @is_write: read or write access + * + * returns true if the MMIO access could be performed + */ +static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu, + struct kvm_io_device *this, gpa_t addr, + int len, void *val, bool is_write) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + struct vgic_io_device *iodev = container_of(this, + struct vgic_io_device, dev); + struct kvm_run *run = vcpu->run; + const struct vgic_io_range *range; + struct kvm_exit_mmio mmio; + bool updated_state; + gpa_t offset; + + offset = addr - iodev->addr; + range = vgic_find_range(iodev->reg_ranges, len, offset); + if (unlikely(!range || !range->handle_mmio)) { + pr_warn("Unhandled access %d %08llx %d\n", is_write, addr, len); + return -ENXIO; + } + + mmio.phys_addr = addr; + mmio.len = len; + mmio.is_write = is_write; + mmio.data = val; + mmio.private = iodev->redist_vcpu; + + spin_lock(&dist->lock); + offset -= range->base; + if (vgic_validate_access(dist, range, offset)) { + updated_state = call_range_handler(vcpu, &mmio, offset, range); + } else { + if (!is_write) + memset(val, 0, len); + updated_state = false; + } + spin_unlock(&dist->lock); + run->mmio.is_write = is_write; + run->mmio.len = len; + run->mmio.phys_addr = addr; + memcpy(run->mmio.data, val, len); + + kvm_handle_mmio_return(vcpu, run); + + if (updated_state) + vgic_kick_vcpus(vcpu->kvm); + + return 0; +} + +static int vgic_handle_mmio_read(struct kvm_vcpu *vcpu, + struct kvm_io_device *this, + gpa_t addr, int len, void *val) +{ + return vgic_handle_mmio_access(vcpu, this, addr, len, val, false); +} + +static int vgic_handle_mmio_write(struct kvm_vcpu *vcpu, + struct kvm_io_device *this, + gpa_t addr, int len, const void *val) +{ + return vgic_handle_mmio_access(vcpu, this, addr, len, (void *)val, + true); +} + +struct kvm_io_device_ops vgic_io_ops = { + .read = vgic_handle_mmio_read, + .write = vgic_handle_mmio_write, +}; + +/** + * vgic_register_kvm_io_dev - register VGIC register frame on the KVM I/O bus + * @kvm: The VM structure pointer + * @base: The (guest) base address for the register frame + * @len: Length of the register frame window + * @ranges: Describing the handler functions for each register + * @redist_vcpu_id: The VCPU ID to pass on to the handlers on call + * @iodev: Points to memory to be passed on to the handler + * + * @iodev stores the parameters of this function to be usable by the handler + * respectively the dispatcher function (since the KVM I/O bus framework lacks + * an opaque parameter). Initialization is done in this function, but the + * reference should be valid and unique for the whole VGIC lifetime. + * If the register frame is not mapped for a specific VCPU, pass -1 to + * @redist_vcpu_id. + */ +int vgic_register_kvm_io_dev(struct kvm *kvm, gpa_t base, int len, + const struct vgic_io_range *ranges, + int redist_vcpu_id, + struct vgic_io_device *iodev) +{ + struct kvm_vcpu *vcpu = NULL; + int ret; + + if (redist_vcpu_id >= 0) + vcpu = kvm_get_vcpu(kvm, redist_vcpu_id); + + iodev->addr = base; + iodev->len = len; + iodev->reg_ranges = ranges; + iodev->redist_vcpu = vcpu; + + kvm_iodevice_init(&iodev->dev, &vgic_io_ops); + + mutex_lock(&kvm->slots_lock); + + ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, base, len, + &iodev->dev); + mutex_unlock(&kvm->slots_lock); + + /* Mark the iodev as invalid if registration fails. */ + if (ret) + iodev->dev.ops = NULL; + + return ret; +} + +static int vgic_nr_shared_irqs(struct vgic_dist *dist) +{ + return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS; +} + +static int compute_active_for_cpu(struct kvm_vcpu *vcpu) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + unsigned long *active, *enabled, *act_percpu, *act_shared; + unsigned long active_private, active_shared; + int nr_shared = vgic_nr_shared_irqs(dist); + int vcpu_id; + + vcpu_id = vcpu->vcpu_id; + act_percpu = vcpu->arch.vgic_cpu.active_percpu; + act_shared = vcpu->arch.vgic_cpu.active_shared; + + active = vgic_bitmap_get_cpu_map(&dist->irq_active, vcpu_id); + enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id); + bitmap_and(act_percpu, active, enabled, VGIC_NR_PRIVATE_IRQS); + + active = vgic_bitmap_get_shared_map(&dist->irq_active); + enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled); + bitmap_and(act_shared, active, enabled, nr_shared); + bitmap_and(act_shared, act_shared, + vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]), + nr_shared); + + active_private = find_first_bit(act_percpu, VGIC_NR_PRIVATE_IRQS); + active_shared = find_first_bit(act_shared, nr_shared); + + return (active_private < VGIC_NR_PRIVATE_IRQS || + active_shared < nr_shared); +} + +static int compute_pending_for_cpu(struct kvm_vcpu *vcpu) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + unsigned long *pending, *enabled, *pend_percpu, *pend_shared; + unsigned long pending_private, pending_shared; + int nr_shared = vgic_nr_shared_irqs(dist); + int vcpu_id; + + vcpu_id = vcpu->vcpu_id; + pend_percpu = vcpu->arch.vgic_cpu.pending_percpu; + pend_shared = vcpu->arch.vgic_cpu.pending_shared; + + pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id); + enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id); + bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS); + + pending = vgic_bitmap_get_shared_map(&dist->irq_pending); + enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled); + bitmap_and(pend_shared, pending, enabled, nr_shared); + bitmap_and(pend_shared, pend_shared, + vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]), + nr_shared); + + pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS); + pending_shared = find_first_bit(pend_shared, nr_shared); + return (pending_private < VGIC_NR_PRIVATE_IRQS || + pending_shared < vgic_nr_shared_irqs(dist)); +} + +/* + * Update the interrupt state and determine which CPUs have pending + * or active interrupts. Must be called with distributor lock held. + */ +void vgic_update_state(struct kvm *kvm) +{ + struct vgic_dist *dist = &kvm->arch.vgic; + struct kvm_vcpu *vcpu; + int c; + + if (!dist->enabled) { + set_bit(0, dist->irq_pending_on_cpu); + return; + } + + kvm_for_each_vcpu(c, vcpu, kvm) { + if (compute_pending_for_cpu(vcpu)) + set_bit(c, dist->irq_pending_on_cpu); + + if (compute_active_for_cpu(vcpu)) + set_bit(c, dist->irq_active_on_cpu); + else + clear_bit(c, dist->irq_active_on_cpu); + } +} + +static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr) +{ + return vgic_ops->get_lr(vcpu, lr); +} + +static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, + struct vgic_lr vlr) +{ + vgic_ops->set_lr(vcpu, lr, vlr); +} + +static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, + struct vgic_lr vlr) +{ + vgic_ops->sync_lr_elrsr(vcpu, lr, vlr); +} + +static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu) +{ + return vgic_ops->get_elrsr(vcpu); +} + +static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu) +{ + return vgic_ops->get_eisr(vcpu); +} + +static inline void vgic_clear_eisr(struct kvm_vcpu *vcpu) +{ + vgic_ops->clear_eisr(vcpu); +} + +static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu) +{ + return vgic_ops->get_interrupt_status(vcpu); +} + +static inline void vgic_enable_underflow(struct kvm_vcpu *vcpu) +{ + vgic_ops->enable_underflow(vcpu); +} + +static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu) +{ + vgic_ops->disable_underflow(vcpu); +} + +void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) +{ + vgic_ops->get_vmcr(vcpu, vmcr); +} + +void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) +{ + vgic_ops->set_vmcr(vcpu, vmcr); +} + +static inline void vgic_enable(struct kvm_vcpu *vcpu) +{ + vgic_ops->enable(vcpu); +} + +static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr); + + vlr.state = 0; + vgic_set_lr(vcpu, lr_nr, vlr); + clear_bit(lr_nr, vgic_cpu->lr_used); + vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; + vgic_sync_lr_elrsr(vcpu, lr_nr, vlr); +} + +/* + * An interrupt may have been disabled after being made pending on the + * CPU interface (the classic case is a timer running while we're + * rebooting the guest - the interrupt would kick as soon as the CPU + * interface gets enabled, with deadly consequences). + * + * The solution is to examine already active LRs, and check the + * interrupt is still enabled. If not, just retire it. + */ +static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + int lr; + + for_each_set_bit(lr, vgic_cpu->lr_used, vgic->nr_lr) { + struct vgic_lr vlr = vgic_get_lr(vcpu, lr); + + if (!vgic_irq_is_enabled(vcpu, vlr.irq)) { + vgic_retire_lr(lr, vlr.irq, vcpu); + if (vgic_irq_is_queued(vcpu, vlr.irq)) + vgic_irq_clear_queued(vcpu, vlr.irq); + } + } +} + +static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq, + int lr_nr, struct vgic_lr vlr) +{ + if (vgic_irq_is_active(vcpu, irq)) { + vlr.state |= LR_STATE_ACTIVE; + kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state); + vgic_irq_clear_active(vcpu, irq); + vgic_update_state(vcpu->kvm); + } else if (vgic_dist_irq_is_pending(vcpu, irq)) { + vlr.state |= LR_STATE_PENDING; + kvm_debug("Set pending: 0x%x\n", vlr.state); + } + + if (!vgic_irq_is_edge(vcpu, irq)) + vlr.state |= LR_EOI_INT; + + vgic_set_lr(vcpu, lr_nr, vlr); + vgic_sync_lr_elrsr(vcpu, lr_nr, vlr); +} + +/* + * Queue an interrupt to a CPU virtual interface. Return true on success, + * or false if it wasn't possible to queue it. + * sgi_source must be zero for any non-SGI interrupts. + */ +bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + struct vgic_lr vlr; + int lr; + + /* Sanitize the input... */ + BUG_ON(sgi_source_id & ~7); + BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS); + BUG_ON(irq >= dist->nr_irqs); + + kvm_debug("Queue IRQ%d\n", irq); + + lr = vgic_cpu->vgic_irq_lr_map[irq]; + + /* Do we have an active interrupt for the same CPUID? */ + if (lr != LR_EMPTY) { + vlr = vgic_get_lr(vcpu, lr); + if (vlr.source == sgi_source_id) { + kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq); + BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); + vgic_queue_irq_to_lr(vcpu, irq, lr, vlr); + return true; + } + } + + /* Try to use another LR for this interrupt */ + lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used, + vgic->nr_lr); + if (lr >= vgic->nr_lr) + return false; + + kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id); + vgic_cpu->vgic_irq_lr_map[irq] = lr; + set_bit(lr, vgic_cpu->lr_used); + + vlr.irq = irq; + vlr.source = sgi_source_id; + vlr.state = 0; + vgic_queue_irq_to_lr(vcpu, irq, lr, vlr); + + return true; +} + +static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq) +{ + if (!vgic_can_sample_irq(vcpu, irq)) + return true; /* level interrupt, already queued */ + + if (vgic_queue_irq(vcpu, 0, irq)) { + if (vgic_irq_is_edge(vcpu, irq)) { + vgic_dist_irq_clear_pending(vcpu, irq); + vgic_cpu_irq_clear(vcpu, irq); + } else { + vgic_irq_set_queued(vcpu, irq); + } + + return true; + } + + return false; +} + +/* + * Fill the list registers with pending interrupts before running the + * guest. + */ +static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + unsigned long *pa_percpu, *pa_shared; + int i, vcpu_id; + int overflow = 0; + int nr_shared = vgic_nr_shared_irqs(dist); + + vcpu_id = vcpu->vcpu_id; + + pa_percpu = vcpu->arch.vgic_cpu.pend_act_percpu; + pa_shared = vcpu->arch.vgic_cpu.pend_act_shared; + + bitmap_or(pa_percpu, vgic_cpu->pending_percpu, vgic_cpu->active_percpu, + VGIC_NR_PRIVATE_IRQS); + bitmap_or(pa_shared, vgic_cpu->pending_shared, vgic_cpu->active_shared, + nr_shared); + /* + * We may not have any pending interrupt, or the interrupts + * may have been serviced from another vcpu. In all cases, + * move along. + */ + if (!kvm_vgic_vcpu_pending_irq(vcpu) && !kvm_vgic_vcpu_active_irq(vcpu)) + goto epilog; + + /* SGIs */ + for_each_set_bit(i, pa_percpu, VGIC_NR_SGIS) { + if (!queue_sgi(vcpu, i)) + overflow = 1; + } + + /* PPIs */ + for_each_set_bit_from(i, pa_percpu, VGIC_NR_PRIVATE_IRQS) { + if (!vgic_queue_hwirq(vcpu, i)) + overflow = 1; + } + + /* SPIs */ + for_each_set_bit(i, pa_shared, nr_shared) { + if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS)) + overflow = 1; + } + + + + +epilog: + if (overflow) { + vgic_enable_underflow(vcpu); + } else { + vgic_disable_underflow(vcpu); + /* + * We're about to run this VCPU, and we've consumed + * everything the distributor had in store for + * us. Claim we don't have anything pending. We'll + * adjust that if needed while exiting. + */ + clear_bit(vcpu_id, dist->irq_pending_on_cpu); + } +} + +static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) +{ + u32 status = vgic_get_interrupt_status(vcpu); + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + bool level_pending = false; + struct kvm *kvm = vcpu->kvm; + + kvm_debug("STATUS = %08x\n", status); + + if (status & INT_STATUS_EOI) { + /* + * Some level interrupts have been EOIed. Clear their + * active bit. + */ + u64 eisr = vgic_get_eisr(vcpu); + unsigned long *eisr_ptr = u64_to_bitmask(&eisr); + int lr; + + for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) { + struct vgic_lr vlr = vgic_get_lr(vcpu, lr); + WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq)); + + spin_lock(&dist->lock); + vgic_irq_clear_queued(vcpu, vlr.irq); + WARN_ON(vlr.state & LR_STATE_MASK); + vlr.state = 0; + vgic_set_lr(vcpu, lr, vlr); + + /* + * If the IRQ was EOIed it was also ACKed and we we + * therefore assume we can clear the soft pending + * state (should it had been set) for this interrupt. + * + * Note: if the IRQ soft pending state was set after + * the IRQ was acked, it actually shouldn't be + * cleared, but we have no way of knowing that unless + * we start trapping ACKs when the soft-pending state + * is set. + */ + vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq); + + /* + * kvm_notify_acked_irq calls kvm_set_irq() + * to reset the IRQ level. Need to release the + * lock for kvm_set_irq to grab it. + */ + spin_unlock(&dist->lock); + + kvm_notify_acked_irq(kvm, 0, + vlr.irq - VGIC_NR_PRIVATE_IRQS); + spin_lock(&dist->lock); + + /* Any additional pending interrupt? */ + if (vgic_dist_irq_get_level(vcpu, vlr.irq)) { + vgic_cpu_irq_set(vcpu, vlr.irq); + level_pending = true; + } else { + vgic_dist_irq_clear_pending(vcpu, vlr.irq); + vgic_cpu_irq_clear(vcpu, vlr.irq); + } + + spin_unlock(&dist->lock); + + /* + * Despite being EOIed, the LR may not have + * been marked as empty. + */ + vgic_sync_lr_elrsr(vcpu, lr, vlr); + } + } + + if (status & INT_STATUS_UNDERFLOW) + vgic_disable_underflow(vcpu); + + /* + * In the next iterations of the vcpu loop, if we sync the vgic state + * after flushing it, but before entering the guest (this happens for + * pending signals and vmid rollovers), then make sure we don't pick + * up any old maintenance interrupts here. + */ + vgic_clear_eisr(vcpu); + + return level_pending; +} + +/* Sync back the VGIC state after a guest run */ +static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + u64 elrsr; + unsigned long *elrsr_ptr; + int lr, pending; + bool level_pending; + + level_pending = vgic_process_maintenance(vcpu); + elrsr = vgic_get_elrsr(vcpu); + elrsr_ptr = u64_to_bitmask(&elrsr); + + /* Clear mappings for empty LRs */ + for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) { + struct vgic_lr vlr; + + if (!test_and_clear_bit(lr, vgic_cpu->lr_used)) + continue; + + vlr = vgic_get_lr(vcpu, lr); + + BUG_ON(vlr.irq >= dist->nr_irqs); + vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY; + } + + /* Check if we still have something up our sleeve... */ + pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr); + if (level_pending || pending < vgic->nr_lr) + set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); +} + +void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + + if (!irqchip_in_kernel(vcpu->kvm)) + return; + + spin_lock(&dist->lock); + __kvm_vgic_flush_hwstate(vcpu); + spin_unlock(&dist->lock); +} + +void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) +{ + if (!irqchip_in_kernel(vcpu->kvm)) + return; + + __kvm_vgic_sync_hwstate(vcpu); +} + +int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + + if (!irqchip_in_kernel(vcpu->kvm)) + return 0; + + return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); +} + +int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + + if (!irqchip_in_kernel(vcpu->kvm)) + return 0; + + return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu); +} + + +void vgic_kick_vcpus(struct kvm *kvm) +{ + struct kvm_vcpu *vcpu; + int c; + + /* + * We've injected an interrupt, time to find out who deserves + * a good kick... + */ + kvm_for_each_vcpu(c, vcpu, kvm) { + if (kvm_vgic_vcpu_pending_irq(vcpu)) + kvm_vcpu_kick(vcpu); + } +} + +static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level) +{ + int edge_triggered = vgic_irq_is_edge(vcpu, irq); + + /* + * Only inject an interrupt if: + * - edge triggered and we have a rising edge + * - level triggered and we change level + */ + if (edge_triggered) { + int state = vgic_dist_irq_is_pending(vcpu, irq); + return level > state; + } else { + int state = vgic_dist_irq_get_level(vcpu, irq); + return level != state; + } +} + +static int vgic_update_irq_pending(struct kvm *kvm, int cpuid, + unsigned int irq_num, bool level) +{ + struct vgic_dist *dist = &kvm->arch.vgic; + struct kvm_vcpu *vcpu; + int edge_triggered, level_triggered; + int enabled; + bool ret = true, can_inject = true; + + spin_lock(&dist->lock); + + vcpu = kvm_get_vcpu(kvm, cpuid); + edge_triggered = vgic_irq_is_edge(vcpu, irq_num); + level_triggered = !edge_triggered; + + if (!vgic_validate_injection(vcpu, irq_num, level)) { + ret = false; + goto out; + } + + if (irq_num >= VGIC_NR_PRIVATE_IRQS) { + cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS]; + if (cpuid == VCPU_NOT_ALLOCATED) { + /* Pretend we use CPU0, and prevent injection */ + cpuid = 0; + can_inject = false; + } + vcpu = kvm_get_vcpu(kvm, cpuid); + } + + kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid); + + if (level) { + if (level_triggered) + vgic_dist_irq_set_level(vcpu, irq_num); + vgic_dist_irq_set_pending(vcpu, irq_num); + } else { + if (level_triggered) { + vgic_dist_irq_clear_level(vcpu, irq_num); + if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) + vgic_dist_irq_clear_pending(vcpu, irq_num); + } + + ret = false; + goto out; + } + + enabled = vgic_irq_is_enabled(vcpu, irq_num); + + if (!enabled || !can_inject) { + ret = false; + goto out; + } + + if (!vgic_can_sample_irq(vcpu, irq_num)) { + /* + * Level interrupt in progress, will be picked up + * when EOId. + */ + ret = false; + goto out; + } + + if (level) { + vgic_cpu_irq_set(vcpu, irq_num); + set_bit(cpuid, dist->irq_pending_on_cpu); + } + +out: + spin_unlock(&dist->lock); + + return ret ? cpuid : -EINVAL; +} + +/** + * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic + * @kvm: The VM structure pointer + * @cpuid: The CPU for PPIs + * @irq_num: The IRQ number that is assigned to the device + * @level: Edge-triggered: true: to trigger the interrupt + * false: to ignore the call + * Level-sensitive true: activates an interrupt + * false: deactivates an interrupt + * + * The GIC is not concerned with devices being active-LOW or active-HIGH for + * level-sensitive interrupts. You can think of the level parameter as 1 + * being HIGH and 0 being LOW and all devices being active-HIGH. + */ +int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, + bool level) +{ + int ret = 0; + int vcpu_id; + + if (unlikely(!vgic_initialized(kvm))) { + /* + * We only provide the automatic initialization of the VGIC + * for the legacy case of a GICv2. Any other type must + * be explicitly initialized once setup with the respective + * KVM device call. + */ + if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2) { + ret = -EBUSY; + goto out; + } + mutex_lock(&kvm->lock); + ret = vgic_init(kvm); + mutex_unlock(&kvm->lock); + + if (ret) + goto out; + } + + if (irq_num >= min(kvm->arch.vgic.nr_irqs, 1020)) + return -EINVAL; + + vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level); + if (vcpu_id >= 0) { + /* kick the specified vcpu */ + kvm_vcpu_kick(kvm_get_vcpu(kvm, vcpu_id)); + } + +out: + return ret; +} + +static irqreturn_t vgic_maintenance_handler(int irq, void *data) +{ + /* + * We cannot rely on the vgic maintenance interrupt to be + * delivered synchronously. This means we can only use it to + * exit the VM, and we perform the handling of EOIed + * interrupts on the exit path (see vgic_process_maintenance). + */ + return IRQ_HANDLED; +} + +void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + + kfree(vgic_cpu->pending_shared); + kfree(vgic_cpu->active_shared); + kfree(vgic_cpu->pend_act_shared); + kfree(vgic_cpu->vgic_irq_lr_map); + vgic_cpu->pending_shared = NULL; + vgic_cpu->active_shared = NULL; + vgic_cpu->pend_act_shared = NULL; + vgic_cpu->vgic_irq_lr_map = NULL; +} + +static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + + int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8; + vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL); + vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL); + vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL); + vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL); + + if (!vgic_cpu->pending_shared + || !vgic_cpu->active_shared + || !vgic_cpu->pend_act_shared + || !vgic_cpu->vgic_irq_lr_map) { + kvm_vgic_vcpu_destroy(vcpu); + return -ENOMEM; + } + + memset(vgic_cpu->vgic_irq_lr_map, LR_EMPTY, nr_irqs); + + /* + * Store the number of LRs per vcpu, so we don't have to go + * all the way to the distributor structure to find out. Only + * assembly code should use this one. + */ + vgic_cpu->nr_lr = vgic->nr_lr; + + return 0; +} + +/** + * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW + * + * The host's GIC naturally limits the maximum amount of VCPUs a guest + * can use. + */ +int kvm_vgic_get_max_vcpus(void) +{ + return vgic->max_gic_vcpus; +} + +void kvm_vgic_destroy(struct kvm *kvm) +{ + struct vgic_dist *dist = &kvm->arch.vgic; + struct kvm_vcpu *vcpu; + int i; + + kvm_for_each_vcpu(i, vcpu, kvm) + kvm_vgic_vcpu_destroy(vcpu); + + vgic_free_bitmap(&dist->irq_enabled); + vgic_free_bitmap(&dist->irq_level); + vgic_free_bitmap(&dist->irq_pending); + vgic_free_bitmap(&dist->irq_soft_pend); + vgic_free_bitmap(&dist->irq_queued); + vgic_free_bitmap(&dist->irq_cfg); + vgic_free_bytemap(&dist->irq_priority); + if (dist->irq_spi_target) { + for (i = 0; i < dist->nr_cpus; i++) + vgic_free_bitmap(&dist->irq_spi_target[i]); + } + kfree(dist->irq_sgi_sources); + kfree(dist->irq_spi_cpu); + kfree(dist->irq_spi_mpidr); + kfree(dist->irq_spi_target); + kfree(dist->irq_pending_on_cpu); + kfree(dist->irq_active_on_cpu); + dist->irq_sgi_sources = NULL; + dist->irq_spi_cpu = NULL; + dist->irq_spi_target = NULL; + dist->irq_pending_on_cpu = NULL; + dist->irq_active_on_cpu = NULL; + dist->nr_cpus = 0; +} + +/* + * Allocate and initialize the various data structures. Must be called + * with kvm->lock held! + */ +int vgic_init(struct kvm *kvm) +{ + struct vgic_dist *dist = &kvm->arch.vgic; + struct kvm_vcpu *vcpu; + int nr_cpus, nr_irqs; + int ret, i, vcpu_id; + + if (vgic_initialized(kvm)) + return 0; + + nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus); + if (!nr_cpus) /* No vcpus? Can't be good... */ + return -ENODEV; + + /* + * If nobody configured the number of interrupts, use the + * legacy one. + */ + if (!dist->nr_irqs) + dist->nr_irqs = VGIC_NR_IRQS_LEGACY; + + nr_irqs = dist->nr_irqs; + + ret = vgic_init_bitmap(&dist->irq_enabled, nr_cpus, nr_irqs); + ret |= vgic_init_bitmap(&dist->irq_level, nr_cpus, nr_irqs); + ret |= vgic_init_bitmap(&dist->irq_pending, nr_cpus, nr_irqs); + ret |= vgic_init_bitmap(&dist->irq_soft_pend, nr_cpus, nr_irqs); + ret |= vgic_init_bitmap(&dist->irq_queued, nr_cpus, nr_irqs); + ret |= vgic_init_bitmap(&dist->irq_active, nr_cpus, nr_irqs); + ret |= vgic_init_bitmap(&dist->irq_cfg, nr_cpus, nr_irqs); + ret |= vgic_init_bytemap(&dist->irq_priority, nr_cpus, nr_irqs); + + if (ret) + goto out; + + dist->irq_sgi_sources = kzalloc(nr_cpus * VGIC_NR_SGIS, GFP_KERNEL); + dist->irq_spi_cpu = kzalloc(nr_irqs - VGIC_NR_PRIVATE_IRQS, GFP_KERNEL); + dist->irq_spi_target = kzalloc(sizeof(*dist->irq_spi_target) * nr_cpus, + GFP_KERNEL); + dist->irq_pending_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long), + GFP_KERNEL); + dist->irq_active_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long), + GFP_KERNEL); + if (!dist->irq_sgi_sources || + !dist->irq_spi_cpu || + !dist->irq_spi_target || + !dist->irq_pending_on_cpu || + !dist->irq_active_on_cpu) { + ret = -ENOMEM; + goto out; + } + + for (i = 0; i < nr_cpus; i++) + ret |= vgic_init_bitmap(&dist->irq_spi_target[i], + nr_cpus, nr_irqs); + + if (ret) + goto out; + + ret = kvm->arch.vgic.vm_ops.init_model(kvm); + if (ret) + goto out; + + kvm_for_each_vcpu(vcpu_id, vcpu, kvm) { + ret = vgic_vcpu_init_maps(vcpu, nr_irqs); + if (ret) { + kvm_err("VGIC: Failed to allocate vcpu memory\n"); + break; + } + + for (i = 0; i < dist->nr_irqs; i++) { + if (i < VGIC_NR_PPIS) + vgic_bitmap_set_irq_val(&dist->irq_enabled, + vcpu->vcpu_id, i, 1); + if (i < VGIC_NR_PRIVATE_IRQS) + vgic_bitmap_set_irq_val(&dist->irq_cfg, + vcpu->vcpu_id, i, + VGIC_CFG_EDGE); + } + + vgic_enable(vcpu); + } + +out: + if (ret) + kvm_vgic_destroy(kvm); + + return ret; +} + +static int init_vgic_model(struct kvm *kvm, int type) +{ + switch (type) { + case KVM_DEV_TYPE_ARM_VGIC_V2: + vgic_v2_init_emulation(kvm); + break; +#ifdef CONFIG_ARM_GIC_V3 + case KVM_DEV_TYPE_ARM_VGIC_V3: + vgic_v3_init_emulation(kvm); + break; +#endif + default: + return -ENODEV; + } + + if (atomic_read(&kvm->online_vcpus) > kvm->arch.max_vcpus) + return -E2BIG; + + return 0; +} + +int kvm_vgic_create(struct kvm *kvm, u32 type) +{ + int i, vcpu_lock_idx = -1, ret; + struct kvm_vcpu *vcpu; + + mutex_lock(&kvm->lock); + + if (irqchip_in_kernel(kvm)) { + ret = -EEXIST; + goto out; + } + + /* + * This function is also called by the KVM_CREATE_IRQCHIP handler, + * which had no chance yet to check the availability of the GICv2 + * emulation. So check this here again. KVM_CREATE_DEVICE does + * the proper checks already. + */ + if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2) { + ret = -ENODEV; + goto out; + } + + /* + * Any time a vcpu is run, vcpu_load is called which tries to grab the + * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure + * that no other VCPUs are run while we create the vgic. + */ + ret = -EBUSY; + kvm_for_each_vcpu(i, vcpu, kvm) { + if (!mutex_trylock(&vcpu->mutex)) + goto out_unlock; + vcpu_lock_idx = i; + } + + kvm_for_each_vcpu(i, vcpu, kvm) { + if (vcpu->arch.has_run_once) + goto out_unlock; + } + ret = 0; + + ret = init_vgic_model(kvm, type); + if (ret) + goto out_unlock; + + spin_lock_init(&kvm->arch.vgic.lock); + kvm->arch.vgic.in_kernel = true; + kvm->arch.vgic.vgic_model = type; + kvm->arch.vgic.vctrl_base = vgic->vctrl_base; + kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF; + kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF; + kvm->arch.vgic.vgic_redist_base = VGIC_ADDR_UNDEF; + +out_unlock: + for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) { + vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx); + mutex_unlock(&vcpu->mutex); + } + +out: + mutex_unlock(&kvm->lock); + return ret; +} + +static int vgic_ioaddr_overlap(struct kvm *kvm) +{ + phys_addr_t dist = kvm->arch.vgic.vgic_dist_base; + phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base; + + if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu)) + return 0; + if ((dist <= cpu && dist + KVM_VGIC_V2_DIST_SIZE > cpu) || + (cpu <= dist && cpu + KVM_VGIC_V2_CPU_SIZE > dist)) + return -EBUSY; + return 0; +} + +static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr, + phys_addr_t addr, phys_addr_t size) +{ + int ret; + + if (addr & ~KVM_PHYS_MASK) + return -E2BIG; + + if (addr & (SZ_4K - 1)) + return -EINVAL; + + if (!IS_VGIC_ADDR_UNDEF(*ioaddr)) + return -EEXIST; + if (addr + size < addr) + return -EINVAL; + + *ioaddr = addr; + ret = vgic_ioaddr_overlap(kvm); + if (ret) + *ioaddr = VGIC_ADDR_UNDEF; + + return ret; +} + +/** + * kvm_vgic_addr - set or get vgic VM base addresses + * @kvm: pointer to the vm struct + * @type: the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX + * @addr: pointer to address value + * @write: if true set the address in the VM address space, if false read the + * address + * + * Set or get the vgic base addresses for the distributor and the virtual CPU + * interface in the VM physical address space. These addresses are properties + * of the emulated core/SoC and therefore user space initially knows this + * information. + */ +int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) +{ + int r = 0; + struct vgic_dist *vgic = &kvm->arch.vgic; + int type_needed; + phys_addr_t *addr_ptr, block_size; + phys_addr_t alignment; + + mutex_lock(&kvm->lock); + switch (type) { + case KVM_VGIC_V2_ADDR_TYPE_DIST: + type_needed = KVM_DEV_TYPE_ARM_VGIC_V2; + addr_ptr = &vgic->vgic_dist_base; + block_size = KVM_VGIC_V2_DIST_SIZE; + alignment = SZ_4K; + break; + case KVM_VGIC_V2_ADDR_TYPE_CPU: + type_needed = KVM_DEV_TYPE_ARM_VGIC_V2; + addr_ptr = &vgic->vgic_cpu_base; + block_size = KVM_VGIC_V2_CPU_SIZE; + alignment = SZ_4K; + break; +#ifdef CONFIG_ARM_GIC_V3 + case KVM_VGIC_V3_ADDR_TYPE_DIST: + type_needed = KVM_DEV_TYPE_ARM_VGIC_V3; + addr_ptr = &vgic->vgic_dist_base; + block_size = KVM_VGIC_V3_DIST_SIZE; + alignment = SZ_64K; + break; + case KVM_VGIC_V3_ADDR_TYPE_REDIST: + type_needed = KVM_DEV_TYPE_ARM_VGIC_V3; + addr_ptr = &vgic->vgic_redist_base; + block_size = KVM_VGIC_V3_REDIST_SIZE; + alignment = SZ_64K; + break; +#endif + default: + r = -ENODEV; + goto out; + } + + if (vgic->vgic_model != type_needed) { + r = -ENODEV; + goto out; + } + + if (write) { + if (!IS_ALIGNED(*addr, alignment)) + r = -EINVAL; + else + r = vgic_ioaddr_assign(kvm, addr_ptr, *addr, + block_size); + } else { + *addr = *addr_ptr; + } + +out: + mutex_unlock(&kvm->lock); + return r; +} + +int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr) +{ + int r; + + switch (attr->group) { + case KVM_DEV_ARM_VGIC_GRP_ADDR: { + u64 __user *uaddr = (u64 __user *)(long)attr->addr; + u64 addr; + unsigned long type = (unsigned long)attr->attr; + + if (copy_from_user(&addr, uaddr, sizeof(addr))) + return -EFAULT; + + r = kvm_vgic_addr(dev->kvm, type, &addr, true); + return (r == -ENODEV) ? -ENXIO : r; + } + case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: { + u32 __user *uaddr = (u32 __user *)(long)attr->addr; + u32 val; + int ret = 0; + + if (get_user(val, uaddr)) + return -EFAULT; + + /* + * We require: + * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs + * - at most 1024 interrupts + * - a multiple of 32 interrupts + */ + if (val < (VGIC_NR_PRIVATE_IRQS + 32) || + val > VGIC_MAX_IRQS || + (val & 31)) + return -EINVAL; + + mutex_lock(&dev->kvm->lock); + + if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_irqs) + ret = -EBUSY; + else + dev->kvm->arch.vgic.nr_irqs = val; + + mutex_unlock(&dev->kvm->lock); + + return ret; + } + case KVM_DEV_ARM_VGIC_GRP_CTRL: { + switch (attr->attr) { + case KVM_DEV_ARM_VGIC_CTRL_INIT: + r = vgic_init(dev->kvm); + return r; + } + break; + } + } + + return -ENXIO; +} + +int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr) +{ + int r = -ENXIO; + + switch (attr->group) { + case KVM_DEV_ARM_VGIC_GRP_ADDR: { + u64 __user *uaddr = (u64 __user *)(long)attr->addr; + u64 addr; + unsigned long type = (unsigned long)attr->attr; + + r = kvm_vgic_addr(dev->kvm, type, &addr, false); + if (r) + return (r == -ENODEV) ? -ENXIO : r; + + if (copy_to_user(uaddr, &addr, sizeof(addr))) + return -EFAULT; + break; + } + case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: { + u32 __user *uaddr = (u32 __user *)(long)attr->addr; + + r = put_user(dev->kvm->arch.vgic.nr_irqs, uaddr); + break; + } + + } + + return r; +} + +int vgic_has_attr_regs(const struct vgic_io_range *ranges, phys_addr_t offset) +{ + if (vgic_find_range(ranges, 4, offset)) + return 0; + else + return -ENXIO; +} + +static void vgic_init_maintenance_interrupt(void *info) +{ + enable_percpu_irq(vgic->maint_irq, 0); +} + +static int vgic_cpu_notify(struct notifier_block *self, + unsigned long action, void *cpu) +{ + switch (action) { + case CPU_STARTING: + case CPU_STARTING_FROZEN: + vgic_init_maintenance_interrupt(NULL); + break; + case CPU_DYING: + case CPU_DYING_FROZEN: + disable_percpu_irq(vgic->maint_irq); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block vgic_cpu_nb = { + .notifier_call = vgic_cpu_notify, +}; + +static const struct of_device_id vgic_ids[] = { + { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, }, + { .compatible = "arm,cortex-a7-gic", .data = vgic_v2_probe, }, + { .compatible = "arm,gic-400", .data = vgic_v2_probe, }, + { .compatible = "arm,gic-v3", .data = vgic_v3_probe, }, + {}, +}; + +int kvm_vgic_hyp_init(void) +{ + const struct of_device_id *matched_id; + const int (*vgic_probe)(struct device_node *,const struct vgic_ops **, + const struct vgic_params **); + struct device_node *vgic_node; + int ret; + + vgic_node = of_find_matching_node_and_match(NULL, + vgic_ids, &matched_id); + if (!vgic_node) { + kvm_err("error: no compatible GIC node found\n"); + return -ENODEV; + } + + vgic_probe = matched_id->data; + ret = vgic_probe(vgic_node, &vgic_ops, &vgic); + if (ret) + return ret; + + ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler, + "vgic", kvm_get_running_vcpus()); + if (ret) { + kvm_err("Cannot register interrupt %d\n", vgic->maint_irq); + return ret; + } + + ret = __register_cpu_notifier(&vgic_cpu_nb); + if (ret) { + kvm_err("Cannot register vgic CPU notifier\n"); + goto out_free_irq; + } + + /* Callback into for arch code for setup */ + vgic_arch_setup(vgic); + + on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1); + + return 0; + +out_free_irq: + free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus()); + return ret; +} + +int kvm_irq_map_gsi(struct kvm *kvm, + struct kvm_kernel_irq_routing_entry *entries, + int gsi) +{ + return 0; +} + +int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin) +{ + return pin; +} + +int kvm_set_irq(struct kvm *kvm, int irq_source_id, + u32 irq, int level, bool line_status) +{ + unsigned int spi = irq + VGIC_NR_PRIVATE_IRQS; + + trace_kvm_set_irq(irq, level, irq_source_id); + + BUG_ON(!vgic_initialized(kvm)); + + return kvm_vgic_inject_irq(kvm, 0, spi, level); +} + +/* MSI not implemented yet */ +int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, + int level, bool line_status) +{ + return 0; +} diff --git a/virt/kvm/arm/vgic.h b/virt/kvm/arm/vgic.h new file mode 100644 index 000000000..0df74cbb6 --- /dev/null +++ b/virt/kvm/arm/vgic.h @@ -0,0 +1,140 @@ +/* + * Copyright (C) 2012-2014 ARM Ltd. + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * Derived from virt/kvm/arm/vgic.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __KVM_VGIC_H__ +#define __KVM_VGIC_H__ + +#include <kvm/iodev.h> + +#define VGIC_ADDR_UNDEF (-1) +#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF) + +#define PRODUCT_ID_KVM 0x4b /* ASCII code K */ +#define IMPLEMENTER_ARM 0x43b + +#define ACCESS_READ_VALUE (1 << 0) +#define ACCESS_READ_RAZ (0 << 0) +#define ACCESS_READ_MASK(x) ((x) & (1 << 0)) +#define ACCESS_WRITE_IGNORED (0 << 1) +#define ACCESS_WRITE_SETBIT (1 << 1) +#define ACCESS_WRITE_CLEARBIT (2 << 1) +#define ACCESS_WRITE_VALUE (3 << 1) +#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1)) + +#define VCPU_NOT_ALLOCATED ((u8)-1) + +unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x); + +void vgic_update_state(struct kvm *kvm); +int vgic_init_common_maps(struct kvm *kvm); + +u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, int cpuid, u32 offset); +u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset); + +void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq); +void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq); +void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq); +void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid, + int irq, int val); + +void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); +void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); + +bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq); +void vgic_unqueue_irqs(struct kvm_vcpu *vcpu); + +struct kvm_exit_mmio { + phys_addr_t phys_addr; + void *data; + u32 len; + bool is_write; + void *private; +}; + +void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg, + phys_addr_t offset, int mode); +bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, + phys_addr_t offset); + +static inline +u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask) +{ + return le32_to_cpu(*((u32 *)mmio->data)) & mask; +} + +static inline +void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value) +{ + *((u32 *)mmio->data) = cpu_to_le32(value) & mask; +} + +struct vgic_io_range { + phys_addr_t base; + unsigned long len; + int bits_per_irq; + bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, + phys_addr_t offset); +}; + +int vgic_register_kvm_io_dev(struct kvm *kvm, gpa_t base, int len, + const struct vgic_io_range *ranges, + int redist_id, + struct vgic_io_device *iodev); + +static inline bool is_in_range(phys_addr_t addr, unsigned long len, + phys_addr_t baseaddr, unsigned long size) +{ + return (addr >= baseaddr) && (addr + len <= baseaddr + size); +} + +const +struct vgic_io_range *vgic_find_range(const struct vgic_io_range *ranges, + int len, gpa_t offset); + +bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, + phys_addr_t offset, int vcpu_id, int access); + +bool vgic_handle_set_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, + phys_addr_t offset, int vcpu_id); + +bool vgic_handle_clear_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, + phys_addr_t offset, int vcpu_id); + +bool vgic_handle_set_active_reg(struct kvm *kvm, + struct kvm_exit_mmio *mmio, + phys_addr_t offset, int vcpu_id); + +bool vgic_handle_clear_active_reg(struct kvm *kvm, + struct kvm_exit_mmio *mmio, + phys_addr_t offset, int vcpu_id); + +bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio, + phys_addr_t offset); + +void vgic_kick_vcpus(struct kvm *kvm); + +int vgic_has_attr_regs(const struct vgic_io_range *ranges, phys_addr_t offset); +int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr); +int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr); + +int vgic_init(struct kvm *kvm); +void vgic_v2_init_emulation(struct kvm *kvm); +void vgic_v3_init_emulation(struct kvm *kvm); + +#endif |