diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-12-15 14:52:16 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-12-15 14:52:16 -0300 |
commit | 8d91c1e411f55d7ea91b1183a2e9f8088fb4d5be (patch) | |
tree | e9891aa6c295060d065adffd610c4f49ecf884f3 /drivers/xen | |
parent | a71852147516bc1cb5b0b3cbd13639bfd4022dc8 (diff) |
Linux-libre 4.3.2-gnu
Diffstat (limited to 'drivers/xen')
-rw-r--r-- | drivers/xen/Kconfig | 11 | ||||
-rw-r--r-- | drivers/xen/balloon.c | 8 | ||||
-rw-r--r-- | drivers/xen/biomerge.c | 6 | ||||
-rw-r--r-- | drivers/xen/events/events_base.c | 17 | ||||
-rw-r--r-- | drivers/xen/events/events_fifo.c | 4 | ||||
-rw-r--r-- | drivers/xen/gntalloc.c | 5 | ||||
-rw-r--r-- | drivers/xen/gntdev.c | 2 | ||||
-rw-r--r-- | drivers/xen/manage.c | 2 | ||||
-rw-r--r-- | drivers/xen/privcmd.c | 48 | ||||
-rw-r--r-- | drivers/xen/swiotlb-xen.c | 22 | ||||
-rw-r--r-- | drivers/xen/sys-hypervisor.c | 136 | ||||
-rw-r--r-- | drivers/xen/tmem.c | 24 | ||||
-rw-r--r-- | drivers/xen/xen-acpi-processor.c | 16 | ||||
-rw-r--r-- | drivers/xen/xenbus/xenbus_client.c | 2 | ||||
-rw-r--r-- | drivers/xen/xenbus/xenbus_dev_backend.c | 2 | ||||
-rw-r--r-- | drivers/xen/xenbus/xenbus_probe.c | 16 | ||||
-rw-r--r-- | drivers/xen/xenfs/Makefile | 1 | ||||
-rw-r--r-- | drivers/xen/xenfs/super.c | 3 | ||||
-rw-r--r-- | drivers/xen/xenfs/xenfs.h | 1 | ||||
-rw-r--r-- | drivers/xen/xenfs/xensyms.c | 152 | ||||
-rw-r--r-- | drivers/xen/xlate_mmu.c | 18 |
21 files changed, 388 insertions, 108 deletions
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 7cd226da1..73708acce 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -280,4 +280,15 @@ config XEN_ACPI def_bool y depends on X86 && ACPI +config XEN_SYMS + bool "Xen symbols" + depends on X86 && XEN_DOM0 && XENFS + default y if KALLSYMS + help + Exports hypervisor symbols (along with their types and addresses) via + /proc/xen/xensyms file, similar to /proc/kallsyms + +config XEN_HAVE_VPMU + bool + endmenu diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index bf4a23c7c..c79329fcf 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -441,7 +441,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) /* Update direct mapping, invalidate P2M, and add to balloon. */ for (i = 0; i < nr_pages; i++) { pfn = frame_list[i]; - frame_list[i] = pfn_to_mfn(pfn); + frame_list[i] = pfn_to_gfn(pfn); page = pfn_to_page(pfn); #ifdef CONFIG_XEN_HAVE_PVMMU @@ -638,9 +638,9 @@ static int __init balloon_init(void) * regions (see arch/x86/xen/setup.c). */ for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) - if (xen_extra_mem[i].size) - balloon_add_region(PFN_UP(xen_extra_mem[i].start), - PFN_DOWN(xen_extra_mem[i].size)); + if (xen_extra_mem[i].n_pfns) + balloon_add_region(xen_extra_mem[i].start_pfn, + xen_extra_mem[i].n_pfns); return 0; } diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c index 0edb91c0d..8ae2fc90e 100644 --- a/drivers/xen/biomerge.c +++ b/drivers/xen/biomerge.c @@ -6,10 +6,10 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, const struct bio_vec *vec2) { - unsigned long mfn1 = pfn_to_mfn(page_to_pfn(vec1->bv_page)); - unsigned long mfn2 = pfn_to_mfn(page_to_pfn(vec2->bv_page)); + unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page)); + unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page)); return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) && - ((mfn1 == mfn2) || ((mfn1+1) == mfn2)); + ((bfn1 == bfn2) || ((bfn1+1) == bfn2)); } EXPORT_SYMBOL(xen_biovec_phys_mergeable); diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 96093ae36..fb2362399 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -39,6 +39,7 @@ #include <asm/irq.h> #include <asm/idle.h> #include <asm/io_apic.h> +#include <asm/i8259.h> #include <asm/xen/pci.h> #include <xen/page.h> #endif @@ -336,7 +337,7 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) BUG_ON(irq == -1); #ifdef CONFIG_SMP - cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(cpu)); + cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu)); #endif xen_evtchn_port_bind_to_cpu(info, cpu); @@ -373,7 +374,7 @@ static void xen_irq_init(unsigned irq) struct irq_info *info; #ifdef CONFIG_SMP /* By default all event channels notify CPU#0. */ - cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(0)); + cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0)); #endif info = kzalloc(sizeof(*info), GFP_KERNEL); @@ -420,7 +421,7 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi) return xen_allocate_irq_dynamic(); /* Legacy IRQ descriptors are already allocated by the arch. */ - if (gsi < NR_IRQS_LEGACY) + if (gsi < nr_legacy_irqs()) irq = gsi; else irq = irq_alloc_desc_at(gsi, -1); @@ -446,7 +447,7 @@ static void xen_free_irq(unsigned irq) kfree(info); /* Legacy IRQ descriptors are managed by the arch. */ - if (irq < NR_IRQS_LEGACY) + if (irq < nr_legacy_irqs()) return; irq_free_desc(irq); @@ -1301,11 +1302,7 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) if (!VALID_EVTCHN(evtchn)) return -1; - /* - * Events delivered via platform PCI interrupts are always - * routed to vcpu 0 and hence cannot be rebound. - */ - if (xen_hvm_domain() && !xen_have_vector_callback) + if (!xen_support_evtchn_rebind()) return -1; /* Send future instances of this interrupt to other vcpu. */ @@ -1692,7 +1689,7 @@ void __init xen_init_IRQ(void) struct physdev_pirq_eoi_gmfn eoi_gmfn; pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO); - eoi_gmfn.gmfn = virt_to_mfn(pirq_eoi_map); + eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map); rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn); /* TODO: No PVH support for PIRQ EOI */ if (rc != 0) { diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c index ed673e1ac..1d4baf56c 100644 --- a/drivers/xen/events/events_fifo.c +++ b/drivers/xen/events/events_fifo.c @@ -111,7 +111,7 @@ static int init_control_block(int cpu, for (i = 0; i < EVTCHN_FIFO_MAX_QUEUES; i++) q->head[i] = 0; - init_control.control_gfn = virt_to_mfn(control_block); + init_control.control_gfn = virt_to_gfn(control_block); init_control.offset = 0; init_control.vcpu = cpu; @@ -167,7 +167,7 @@ static int evtchn_fifo_setup(struct irq_info *info) /* Mask all events in this page before adding it. */ init_array_page(array_page); - expand_array.array_gfn = virt_to_mfn(array_page); + expand_array.array_gfn = virt_to_gfn(array_page); ret = HYPERVISOR_event_channel_op(EVTCHNOP_expand_array, &expand_array); if (ret < 0) diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c index e53fe1917..4547a91bc 100644 --- a/drivers/xen/gntalloc.c +++ b/drivers/xen/gntalloc.c @@ -142,7 +142,8 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op, /* Grant foreign access to the page. */ rc = gnttab_grant_foreign_access(op->domid, - pfn_to_mfn(page_to_pfn(gref->page)), readonly); + xen_page_to_gfn(gref->page), + readonly); if (rc < 0) goto undo; gref_ids[i] = gref->gref_id = rc; @@ -493,7 +494,7 @@ static void gntalloc_vma_close(struct vm_area_struct *vma) mutex_unlock(&gref_mutex); } -static struct vm_operations_struct gntalloc_vmops = { +static const struct vm_operations_struct gntalloc_vmops = { .open = gntalloc_vma_open, .close = gntalloc_vma_close, }; diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 0dbb222da..2ea0b3b2a 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -433,7 +433,7 @@ static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma, return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT]; } -static struct vm_operations_struct gntdev_vmops = { +static const struct vm_operations_struct gntdev_vmops = { .open = gntdev_vma_open, .close = gntdev_vma_close, .find_special_page = gntdev_vma_find_special_page, diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index d10effee9..e12bd3635 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -80,7 +80,7 @@ static int xen_suspend(void *data) * is resuming in a new domain. */ si->cancelled = HYPERVISOR_suspend(xen_pv_domain() - ? virt_to_mfn(xen_start_info) + ? virt_to_gfn(xen_start_info) : 0); xen_arch_post_suspend(si->cancelled); diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 5a296161d..5e9adac92 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -193,16 +193,16 @@ static int traverse_pages_block(unsigned nelem, size_t size, return ret; } -struct mmap_mfn_state { +struct mmap_gfn_state { unsigned long va; struct vm_area_struct *vma; domid_t domain; }; -static int mmap_mfn_range(void *data, void *state) +static int mmap_gfn_range(void *data, void *state) { struct privcmd_mmap_entry *msg = data; - struct mmap_mfn_state *st = state; + struct mmap_gfn_state *st = state; struct vm_area_struct *vma = st->vma; int rc; @@ -216,7 +216,7 @@ static int mmap_mfn_range(void *data, void *state) ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) return -EINVAL; - rc = xen_remap_domain_mfn_range(vma, + rc = xen_remap_domain_gfn_range(vma, msg->va & PAGE_MASK, msg->mfn, msg->npages, vma->vm_page_prot, @@ -236,7 +236,7 @@ static long privcmd_ioctl_mmap(void __user *udata) struct vm_area_struct *vma; int rc; LIST_HEAD(pagelist); - struct mmap_mfn_state state; + struct mmap_gfn_state state; /* We only support privcmd_ioctl_mmap_batch for auto translated. */ if (xen_feature(XENFEAT_auto_translated_physmap)) @@ -273,7 +273,7 @@ static long privcmd_ioctl_mmap(void __user *udata) rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry), &pagelist, - mmap_mfn_range, &state); + mmap_gfn_range, &state); out_up: @@ -299,18 +299,18 @@ struct mmap_batch_state { int global_error; int version; - /* User-space mfn array to store errors in the second pass for V1. */ - xen_pfn_t __user *user_mfn; + /* User-space gfn array to store errors in the second pass for V1. */ + xen_pfn_t __user *user_gfn; /* User-space int array to store errors in the second pass for V2. */ int __user *user_err; }; -/* auto translated dom0 note: if domU being created is PV, then mfn is - * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP). +/* auto translated dom0 note: if domU being created is PV, then gfn is + * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP). */ static int mmap_batch_fn(void *data, int nr, void *state) { - xen_pfn_t *mfnp = data; + xen_pfn_t *gfnp = data; struct mmap_batch_state *st = state; struct vm_area_struct *vma = st->vma; struct page **pages = vma->vm_private_data; @@ -321,8 +321,8 @@ static int mmap_batch_fn(void *data, int nr, void *state) cur_pages = &pages[st->index]; BUG_ON(nr < 0); - ret = xen_remap_domain_mfn_array(st->vma, st->va & PAGE_MASK, mfnp, nr, - (int *)mfnp, st->vma->vm_page_prot, + ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr, + (int *)gfnp, st->vma->vm_page_prot, st->domain, cur_pages); /* Adjust the global_error? */ @@ -347,22 +347,22 @@ static int mmap_return_error(int err, struct mmap_batch_state *st) if (st->version == 1) { if (err) { - xen_pfn_t mfn; + xen_pfn_t gfn; - ret = get_user(mfn, st->user_mfn); + ret = get_user(gfn, st->user_gfn); if (ret < 0) return ret; /* * V1 encodes the error codes in the 32bit top - * nibble of the mfn (with its known + * nibble of the gfn (with its known * limitations vis-a-vis 64 bit callers). */ - mfn |= (err == -ENOENT) ? + gfn |= (err == -ENOENT) ? PRIVCMD_MMAPBATCH_PAGED_ERROR : PRIVCMD_MMAPBATCH_MFN_ERROR; - return __put_user(mfn, st->user_mfn++); + return __put_user(gfn, st->user_gfn++); } else - st->user_mfn++; + st->user_gfn++; } else { /* st->version == 2 */ if (err) return __put_user(err, st->user_err++); @@ -388,7 +388,7 @@ static int mmap_return_errors(void *data, int nr, void *state) return 0; } -/* Allocate pfns that are then mapped with gmfns from foreign domid. Update +/* Allocate pfns that are then mapped with gfns from foreign domid. Update * the vma with the page info to use later. * Returns: 0 if success, otherwise -errno */ @@ -414,7 +414,7 @@ static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs) return 0; } -static struct vm_operations_struct privcmd_vm_ops; +static const struct vm_operations_struct privcmd_vm_ops; static long privcmd_ioctl_mmap_batch(void __user *udata, int version) { @@ -526,7 +526,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) if (state.global_error) { /* Write back errors in second pass. */ - state.user_mfn = (xen_pfn_t *)m.arr; + state.user_gfn = (xen_pfn_t *)m.arr; state.user_err = m.err; ret = traverse_pages_block(m.num, sizeof(xen_pfn_t), &pagelist, mmap_return_errors, &state); @@ -587,7 +587,7 @@ static void privcmd_close(struct vm_area_struct *vma) if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) return; - rc = xen_unmap_domain_mfn_range(vma, numpgs, pages); + rc = xen_unmap_domain_gfn_range(vma, numpgs, pages); if (rc == 0) free_xenballooned_pages(numpgs, pages); else @@ -605,7 +605,7 @@ static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) return VM_FAULT_SIGBUS; } -static struct vm_operations_struct privcmd_vm_ops = { +static const struct vm_operations_struct privcmd_vm_ops = { .close = privcmd_close, .fault = privcmd_fault }; diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 4c549323c..79bc4933b 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -82,8 +82,8 @@ static u64 start_dma_addr; */ static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr) { - unsigned long mfn = pfn_to_mfn(PFN_DOWN(paddr)); - dma_addr_t dma = (dma_addr_t)mfn << PAGE_SHIFT; + unsigned long bfn = pfn_to_bfn(PFN_DOWN(paddr)); + dma_addr_t dma = (dma_addr_t)bfn << PAGE_SHIFT; dma |= paddr & ~PAGE_MASK; @@ -92,7 +92,7 @@ static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr) static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr) { - unsigned long pfn = mfn_to_pfn(PFN_DOWN(baddr)); + unsigned long pfn = bfn_to_pfn(PFN_DOWN(baddr)); dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT; phys_addr_t paddr = dma; @@ -110,15 +110,15 @@ static int check_pages_physically_contiguous(unsigned long pfn, unsigned int offset, size_t length) { - unsigned long next_mfn; + unsigned long next_bfn; int i; int nr_pages; - next_mfn = pfn_to_mfn(pfn); + next_bfn = pfn_to_bfn(pfn); nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT; for (i = 1; i < nr_pages; i++) { - if (pfn_to_mfn(++pfn) != ++next_mfn) + if (pfn_to_bfn(++pfn) != ++next_bfn) return 0; } return 1; @@ -138,8 +138,8 @@ static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) static int is_xen_swiotlb_buffer(dma_addr_t dma_addr) { - unsigned long mfn = PFN_DOWN(dma_addr); - unsigned long pfn = mfn_to_local_pfn(mfn); + unsigned long bfn = PFN_DOWN(dma_addr); + unsigned long pfn = bfn_to_local_pfn(bfn); phys_addr_t paddr; /* If the address is outside our domain, it CAN @@ -311,9 +311,6 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, */ flags &= ~(__GFP_DMA | __GFP_HIGHMEM); - if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret)) - return ret; - /* On ARM this function returns an ioremap'ped virtual address for * which virt_to_phys doesn't return the corresponding physical * address. In fact on ARM virt_to_phys only works for kernel direct @@ -356,9 +353,6 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, phys_addr_t phys; u64 dma_mask = DMA_BIT_MASK(32); - if (dma_release_from_coherent(hwdev, order, vaddr)) - return; - if (hwdev && hwdev->coherent_dma_mask) dma_mask = hwdev->coherent_dma_mask; diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c index 96453f8a8..b5a7342e0 100644 --- a/drivers/xen/sys-hypervisor.c +++ b/drivers/xen/sys-hypervisor.c @@ -20,6 +20,9 @@ #include <xen/xenbus.h> #include <xen/interface/xen.h> #include <xen/interface/version.h> +#ifdef CONFIG_XEN_HAVE_VPMU +#include <xen/interface/xenpmu.h> +#endif #define HYPERVISOR_ATTR_RO(_name) \ static struct hyp_sysfs_attr _name##_attr = __ATTR_RO(_name) @@ -368,6 +371,126 @@ static void xen_properties_destroy(void) sysfs_remove_group(hypervisor_kobj, &xen_properties_group); } +#ifdef CONFIG_XEN_HAVE_VPMU +struct pmu_mode { + const char *name; + uint32_t mode; +}; + +static struct pmu_mode pmu_modes[] = { + {"off", XENPMU_MODE_OFF}, + {"self", XENPMU_MODE_SELF}, + {"hv", XENPMU_MODE_HV}, + {"all", XENPMU_MODE_ALL} +}; + +static ssize_t pmu_mode_store(struct hyp_sysfs_attr *attr, + const char *buffer, size_t len) +{ + int ret; + struct xen_pmu_params xp; + int i; + + for (i = 0; i < ARRAY_SIZE(pmu_modes); i++) { + if (strncmp(buffer, pmu_modes[i].name, len - 1) == 0) { + xp.val = pmu_modes[i].mode; + break; + } + } + + if (i == ARRAY_SIZE(pmu_modes)) + return -EINVAL; + + xp.version.maj = XENPMU_VER_MAJ; + xp.version.min = XENPMU_VER_MIN; + ret = HYPERVISOR_xenpmu_op(XENPMU_mode_set, &xp); + if (ret) + return ret; + + return len; +} + +static ssize_t pmu_mode_show(struct hyp_sysfs_attr *attr, char *buffer) +{ + int ret; + struct xen_pmu_params xp; + int i; + uint32_t mode; + + xp.version.maj = XENPMU_VER_MAJ; + xp.version.min = XENPMU_VER_MIN; + ret = HYPERVISOR_xenpmu_op(XENPMU_mode_get, &xp); + if (ret) + return ret; + + mode = (uint32_t)xp.val; + for (i = 0; i < ARRAY_SIZE(pmu_modes); i++) { + if (mode == pmu_modes[i].mode) + return sprintf(buffer, "%s\n", pmu_modes[i].name); + } + + return -EINVAL; +} +HYPERVISOR_ATTR_RW(pmu_mode); + +static ssize_t pmu_features_store(struct hyp_sysfs_attr *attr, + const char *buffer, size_t len) +{ + int ret; + uint32_t features; + struct xen_pmu_params xp; + + ret = kstrtou32(buffer, 0, &features); + if (ret) + return ret; + + xp.val = features; + xp.version.maj = XENPMU_VER_MAJ; + xp.version.min = XENPMU_VER_MIN; + ret = HYPERVISOR_xenpmu_op(XENPMU_feature_set, &xp); + if (ret) + return ret; + + return len; +} + +static ssize_t pmu_features_show(struct hyp_sysfs_attr *attr, char *buffer) +{ + int ret; + struct xen_pmu_params xp; + + xp.version.maj = XENPMU_VER_MAJ; + xp.version.min = XENPMU_VER_MIN; + ret = HYPERVISOR_xenpmu_op(XENPMU_feature_get, &xp); + if (ret) + return ret; + + return sprintf(buffer, "0x%x\n", (uint32_t)xp.val); +} +HYPERVISOR_ATTR_RW(pmu_features); + +static struct attribute *xen_pmu_attrs[] = { + &pmu_mode_attr.attr, + &pmu_features_attr.attr, + NULL +}; + +static const struct attribute_group xen_pmu_group = { + .name = "pmu", + .attrs = xen_pmu_attrs, +}; + +static int __init xen_pmu_init(void) +{ + return sysfs_create_group(hypervisor_kobj, &xen_pmu_group); +} + +static void xen_pmu_destroy(void) +{ + sysfs_remove_group(hypervisor_kobj, &xen_pmu_group); +} +#endif + static int __init hyper_sysfs_init(void) { int ret; @@ -390,7 +513,15 @@ static int __init hyper_sysfs_init(void) ret = xen_properties_init(); if (ret) goto prop_out; - +#ifdef CONFIG_XEN_HAVE_VPMU + if (xen_initial_domain()) { + ret = xen_pmu_init(); + if (ret) { + xen_properties_destroy(); + goto prop_out; + } + } +#endif goto out; prop_out: @@ -407,6 +538,9 @@ out: static void __exit hyper_sysfs_exit(void) { +#ifdef CONFIG_XEN_HAVE_VPMU + xen_pmu_destroy(); +#endif xen_properties_destroy(); xen_compilation_destroy(); xen_sysfs_uuid_destroy(); diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c index 239738f94..945fc4327 100644 --- a/drivers/xen/tmem.c +++ b/drivers/xen/tmem.c @@ -129,21 +129,17 @@ static int xen_tmem_new_pool(struct tmem_pool_uuid uuid, /* xen generic tmem ops */ static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid, - u32 index, unsigned long pfn) + u32 index, struct page *page) { - unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn; - return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index, - gmfn, 0, 0, 0); + xen_page_to_gfn(page), 0, 0, 0); } static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid, - u32 index, unsigned long pfn) + u32 index, struct page *page) { - unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn; - return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index, - gmfn, 0, 0, 0); + xen_page_to_gfn(page), 0, 0, 0); } static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index) @@ -173,14 +169,13 @@ static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key, { u32 ind = (u32) index; struct tmem_oid oid = *(struct tmem_oid *)&key; - unsigned long pfn = page_to_pfn(page); if (pool < 0) return; if (ind != index) return; mb(); /* ensure page is quiescent; tmem may address it with an alias */ - (void)xen_tmem_put_page((u32)pool, oid, ind, pfn); + (void)xen_tmem_put_page((u32)pool, oid, ind, page); } static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key, @@ -188,7 +183,6 @@ static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key, { u32 ind = (u32) index; struct tmem_oid oid = *(struct tmem_oid *)&key; - unsigned long pfn = page_to_pfn(page); int ret; /* translate return values to linux semantics */ @@ -196,7 +190,7 @@ static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key, return -1; if (ind != index) return -1; - ret = xen_tmem_get_page((u32)pool, oid, ind, pfn); + ret = xen_tmem_get_page((u32)pool, oid, ind, page); if (ret == 1) return 0; else @@ -287,7 +281,6 @@ static int tmem_frontswap_store(unsigned type, pgoff_t offset, { u64 ind64 = (u64)offset; u32 ind = (u32)offset; - unsigned long pfn = page_to_pfn(page); int pool = tmem_frontswap_poolid; int ret; @@ -296,7 +289,7 @@ static int tmem_frontswap_store(unsigned type, pgoff_t offset, if (ind64 != ind) return -1; mb(); /* ensure page is quiescent; tmem may address it with an alias */ - ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), pfn); + ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), page); /* translate Xen tmem return values to linux semantics */ if (ret == 1) return 0; @@ -313,7 +306,6 @@ static int tmem_frontswap_load(unsigned type, pgoff_t offset, { u64 ind64 = (u64)offset; u32 ind = (u32)offset; - unsigned long pfn = page_to_pfn(page); int pool = tmem_frontswap_poolid; int ret; @@ -321,7 +313,7 @@ static int tmem_frontswap_load(unsigned type, pgoff_t offset, return -1; if (ind64 != ind) return -1; - ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), pfn); + ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), page); /* translate Xen tmem return values to linux semantics */ if (ret == 1) return 0; diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c index 59fc190f1..70fa43800 100644 --- a/drivers/xen/xen-acpi-processor.c +++ b/drivers/xen/xen-acpi-processor.c @@ -560,11 +560,9 @@ static int __init xen_acpi_processor_init(void) return 0; err_unregister: - for_each_possible_cpu(i) { - struct acpi_processor_performance *perf; - perf = per_cpu_ptr(acpi_perf_data, i); - acpi_processor_unregister_performance(perf, i); - } + for_each_possible_cpu(i) + acpi_processor_unregister_performance(i); + err_out: /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */ free_acpi_perf_data(); @@ -579,11 +577,9 @@ static void __exit xen_acpi_processor_exit(void) kfree(acpi_ids_done); kfree(acpi_id_present); kfree(acpi_id_cst_present); - for_each_possible_cpu(i) { - struct acpi_processor_performance *perf; - perf = per_cpu_ptr(acpi_perf_data, i); - acpi_processor_unregister_performance(perf, i); - } + for_each_possible_cpu(i) + acpi_processor_unregister_performance(i); + free_acpi_perf_data(); } diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index e30353575..2ba09c119 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -380,7 +380,7 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, for (i = 0; i < nr_pages; i++) { err = gnttab_grant_foreign_access(dev->otherend_id, - virt_to_mfn(vaddr), 0); + virt_to_gfn(vaddr), 0); if (err < 0) { xenbus_dev_fatal(dev, err, "granting access to ring page"); diff --git a/drivers/xen/xenbus/xenbus_dev_backend.c b/drivers/xen/xenbus/xenbus_dev_backend.c index b17707ee0..ee6d9efd7 100644 --- a/drivers/xen/xenbus/xenbus_dev_backend.c +++ b/drivers/xen/xenbus/xenbus_dev_backend.c @@ -49,7 +49,7 @@ static long xenbus_alloc(domid_t domid) goto out_err; gnttab_grant_foreign_access_ref(GNTTAB_RESERVED_XENSTORE, domid, - virt_to_mfn(xen_store_interface), 0 /* writable */); + virt_to_gfn(xen_store_interface), 0 /* writable */); arg.dom = DOMID_SELF; arg.remote_dom = domid; diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 4308fb3cf..3cbe0556d 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -75,7 +75,7 @@ EXPORT_SYMBOL_GPL(xen_store_interface); enum xenstore_init xen_store_domain_type; EXPORT_SYMBOL_GPL(xen_store_domain_type); -static unsigned long xen_store_mfn; +static unsigned long xen_store_gfn; static BLOCKING_NOTIFIER_HEAD(xenstore_chain); @@ -711,9 +711,7 @@ static int __init xenstored_local_init(void) if (!page) goto out_err; - xen_store_mfn = xen_start_info->store_mfn = - pfn_to_mfn(virt_to_phys((void *)page) >> - PAGE_SHIFT); + xen_store_gfn = xen_start_info->store_mfn = virt_to_gfn((void *)page); /* Next allocate a local port which xenstored can bind to */ alloc_unbound.dom = DOMID_SELF; @@ -787,12 +785,12 @@ static int __init xenbus_init(void) err = xenstored_local_init(); if (err) goto out_error; - xen_store_interface = mfn_to_virt(xen_store_mfn); + xen_store_interface = gfn_to_virt(xen_store_gfn); break; case XS_PV: xen_store_evtchn = xen_start_info->store_evtchn; - xen_store_mfn = xen_start_info->store_mfn; - xen_store_interface = mfn_to_virt(xen_store_mfn); + xen_store_gfn = xen_start_info->store_mfn; + xen_store_interface = gfn_to_virt(xen_store_gfn); break; case XS_HVM: err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); @@ -802,9 +800,9 @@ static int __init xenbus_init(void) err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); if (err) goto out_error; - xen_store_mfn = (unsigned long)v; + xen_store_gfn = (unsigned long)v; xen_store_interface = - xen_remap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); + xen_remap(xen_store_gfn << PAGE_SHIFT, PAGE_SIZE); break; default: pr_warn("Xenstore state unknown\n"); diff --git a/drivers/xen/xenfs/Makefile b/drivers/xen/xenfs/Makefile index b019865fc..1a83010dd 100644 --- a/drivers/xen/xenfs/Makefile +++ b/drivers/xen/xenfs/Makefile @@ -2,3 +2,4 @@ obj-$(CONFIG_XENFS) += xenfs.o xenfs-y = super.o xenfs-$(CONFIG_XEN_DOM0) += xenstored.o +xenfs-$(CONFIG_XEN_SYMS) += xensyms.o diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c index 06092e0fe..8559a71f3 100644 --- a/drivers/xen/xenfs/super.c +++ b/drivers/xen/xenfs/super.c @@ -57,6 +57,9 @@ static int xenfs_fill_super(struct super_block *sb, void *data, int silent) { "privcmd", &xen_privcmd_fops, S_IRUSR|S_IWUSR }, { "xsd_kva", &xsd_kva_file_ops, S_IRUSR|S_IWUSR}, { "xsd_port", &xsd_port_file_ops, S_IRUSR|S_IWUSR}, +#ifdef CONFIG_XEN_SYMS + { "xensyms", &xensyms_ops, S_IRUSR}, +#endif {""}, }; diff --git a/drivers/xen/xenfs/xenfs.h b/drivers/xen/xenfs/xenfs.h index 6b80c7779..2c5934ea9 100644 --- a/drivers/xen/xenfs/xenfs.h +++ b/drivers/xen/xenfs/xenfs.h @@ -3,5 +3,6 @@ extern const struct file_operations xsd_kva_file_ops; extern const struct file_operations xsd_port_file_ops; +extern const struct file_operations xensyms_ops; #endif /* _XENFS_XENBUS_H */ diff --git a/drivers/xen/xenfs/xensyms.c b/drivers/xen/xenfs/xensyms.c new file mode 100644 index 000000000..f8b128567 --- /dev/null +++ b/drivers/xen/xenfs/xensyms.c @@ -0,0 +1,152 @@ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/seq_file.h> +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/proc_fs.h> +#include <linux/slab.h> +#include <xen/interface/platform.h> +#include <asm/xen/hypercall.h> +#include <xen/xen-ops.h> +#include "xenfs.h" + + +#define XEN_KSYM_NAME_LEN 127 /* Hypervisor may have different name length */ + +struct xensyms { + struct xen_platform_op op; + char *name; + uint32_t namelen; +}; + +/* Grab next output page from the hypervisor */ +static int xensyms_next_sym(struct xensyms *xs) +{ + int ret; + struct xenpf_symdata *symdata = &xs->op.u.symdata; + uint64_t symnum; + + memset(xs->name, 0, xs->namelen); + symdata->namelen = xs->namelen; + + symnum = symdata->symnum; + + ret = HYPERVISOR_dom0_op(&xs->op); + if (ret < 0) + return ret; + + /* + * If hypervisor's symbol didn't fit into the buffer then allocate + * a larger buffer and try again. + */ + if (unlikely(symdata->namelen > xs->namelen)) { + kfree(xs->name); + + xs->namelen = symdata->namelen; + xs->name = kzalloc(xs->namelen, GFP_KERNEL); + if (!xs->name) + return -ENOMEM; + + set_xen_guest_handle(symdata->name, xs->name); + symdata->symnum--; /* Rewind */ + + ret = HYPERVISOR_dom0_op(&xs->op); + if (ret < 0) + return ret; + } + + if (symdata->symnum == symnum) + /* End of symbols */ + return 1; + + return 0; +} + +static void *xensyms_start(struct seq_file *m, loff_t *pos) +{ + struct xensyms *xs = (struct xensyms *)m->private; + + xs->op.u.symdata.symnum = *pos; + + if (xensyms_next_sym(xs)) + return NULL; + + return m->private; +} + +static void *xensyms_next(struct seq_file *m, void *p, loff_t *pos) +{ + struct xensyms *xs = (struct xensyms *)m->private; + + xs->op.u.symdata.symnum = ++(*pos); + + if (xensyms_next_sym(xs)) + return NULL; + + return p; +} + +static int xensyms_show(struct seq_file *m, void *p) +{ + struct xensyms *xs = (struct xensyms *)m->private; + struct xenpf_symdata *symdata = &xs->op.u.symdata; + + seq_printf(m, "%016llx %c %s\n", symdata->address, + symdata->type, xs->name); + + return 0; +} + +static void xensyms_stop(struct seq_file *m, void *p) +{ +} + +static const struct seq_operations xensyms_seq_ops = { + .start = xensyms_start, + .next = xensyms_next, + .show = xensyms_show, + .stop = xensyms_stop, +}; + +static int xensyms_open(struct inode *inode, struct file *file) +{ + struct seq_file *m; + struct xensyms *xs; + int ret; + + ret = seq_open_private(file, &xensyms_seq_ops, + sizeof(struct xensyms)); + if (ret) + return ret; + + m = file->private_data; + xs = (struct xensyms *)m->private; + + xs->namelen = XEN_KSYM_NAME_LEN + 1; + xs->name = kzalloc(xs->namelen, GFP_KERNEL); + if (!xs->name) { + seq_release_private(inode, file); + return -ENOMEM; + } + set_xen_guest_handle(xs->op.u.symdata.name, xs->name); + xs->op.cmd = XENPF_get_symbol; + xs->op.u.symdata.namelen = xs->namelen; + + return 0; +} + +static int xensyms_release(struct inode *inode, struct file *file) +{ + struct seq_file *m = file->private_data; + struct xensyms *xs = (struct xensyms *)m->private; + + kfree(xs->name); + return seq_release_private(inode, file); +} + +const struct file_operations xensyms_ops = { + .open = xensyms_open, + .read = seq_read, + .llseek = seq_lseek, + .release = xensyms_release +}; diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c index 58a5389ae..cff23872c 100644 --- a/drivers/xen/xlate_mmu.c +++ b/drivers/xen/xlate_mmu.c @@ -38,8 +38,8 @@ #include <xen/interface/xen.h> #include <xen/interface/memory.h> -/* map fgmfn of domid to lpfn in the current domain */ -static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn, +/* map fgfn of domid to lpfn in the current domain */ +static int map_foreign_page(unsigned long lpfn, unsigned long fgfn, unsigned int domid) { int rc; @@ -49,7 +49,7 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn, .size = 1, .space = XENMAPSPACE_gmfn_foreign, }; - xen_ulong_t idx = fgmfn; + xen_ulong_t idx = fgfn; xen_pfn_t gpfn = lpfn; int err = 0; @@ -62,13 +62,13 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn, } struct remap_data { - xen_pfn_t *fgmfn; /* foreign domain's gmfn */ + xen_pfn_t *fgfn; /* foreign domain's gfn */ pgprot_t prot; domid_t domid; struct vm_area_struct *vma; int index; struct page **pages; - struct xen_remap_mfn_info *info; + struct xen_remap_gfn_info *info; int *err_ptr; int mapped; }; @@ -82,20 +82,20 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot)); int rc; - rc = map_foreign_page(pfn, *info->fgmfn, info->domid); + rc = map_foreign_page(pfn, *info->fgfn, info->domid); *info->err_ptr++ = rc; if (!rc) { set_pte_at(info->vma->vm_mm, addr, ptep, pte); info->mapped++; } - info->fgmfn++; + info->fgfn++; return 0; } int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, unsigned long addr, - xen_pfn_t *mfn, int nr, + xen_pfn_t *gfn, int nr, int *err_ptr, pgprot_t prot, unsigned domid, struct page **pages) @@ -108,7 +108,7 @@ int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, x86 PVOPS */ BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); - data.fgmfn = mfn; + data.fgfn = gfn; data.prot = prot; data.domid = domid; data.vma = vma; |