diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-01-20 14:01:31 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-01-20 14:01:31 -0300 |
commit | b4b7ff4b08e691656c9d77c758fc355833128ac0 (patch) | |
tree | 82fcb00e6b918026dc9f2d1f05ed8eee83874cc0 /arch/arm/xen | |
parent | 35acfa0fc609f2a2cd95cef4a6a9c3a5c38f1778 (diff) |
Linux-libre 4.4-gnupck-4.4-gnu
Diffstat (limited to 'arch/arm/xen')
-rw-r--r-- | arch/arm/xen/enlighten.c | 20 | ||||
-rw-r--r-- | arch/arm/xen/mm.c | 41 | ||||
-rw-r--r-- | arch/arm/xen/p2m.c | 6 |
3 files changed, 49 insertions, 18 deletions
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index eeeab074e..fc7ea529f 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -86,16 +86,25 @@ static void xen_percpu_init(void) int err; int cpu = get_cpu(); + /* + * VCPUOP_register_vcpu_info cannot be called twice for the same + * vcpu, so if vcpu_info is already registered, just get out. This + * can happen with cpu-hotplug. + */ + if (per_cpu(xen_vcpu, cpu) != NULL) + goto after_register_vcpu_info; + pr_info("Xen: initializing cpu%d\n", cpu); vcpup = per_cpu_ptr(xen_vcpu_info, cpu); - info.mfn = __pa(vcpup) >> PAGE_SHIFT; - info.offset = offset_in_page(vcpup); + info.mfn = virt_to_gfn(vcpup); + info.offset = xen_offset_in_page(vcpup); err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); BUG_ON(err); per_cpu(xen_vcpu, cpu) = vcpup; +after_register_vcpu_info: enable_percpu_irq(xen_events_irq, 0); put_cpu(); } @@ -124,6 +133,9 @@ static int xen_cpu_notification(struct notifier_block *self, case CPU_STARTING: xen_percpu_init(); break; + case CPU_DYING: + disable_percpu_irq(xen_events_irq); + break; default: break; } @@ -213,7 +225,7 @@ static int __init xen_guest_init(void) xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; - xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT; + xatp.gpfn = virt_to_gfn(shared_info_page); if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); @@ -284,7 +296,7 @@ void xen_arch_resume(void) { } void xen_arch_suspend(void) { } -/* In the hypervisor.S file. */ +/* In the hypercall.S file. */ EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op); EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op); EXPORT_SYMBOL_GPL(HYPERVISOR_xen_version); diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index 6dd911d1f..c5f9a9e3d 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -25,7 +25,7 @@ unsigned long xen_get_swiotlb_free_pages(unsigned int order) { struct memblock_region *reg; - gfp_t flags = __GFP_NOWARN; + gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM; for_each_memblock(memory, reg) { if (reg->base < (phys_addr_t)0xffffffff) { @@ -48,22 +48,22 @@ static void dma_cache_maint(dma_addr_t handle, unsigned long offset, size_t size, enum dma_data_direction dir, enum dma_cache_op op) { struct gnttab_cache_flush cflush; - unsigned long pfn; + unsigned long xen_pfn; size_t left = size; - pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE; - offset %= PAGE_SIZE; + xen_pfn = (handle >> XEN_PAGE_SHIFT) + offset / XEN_PAGE_SIZE; + offset %= XEN_PAGE_SIZE; do { size_t len = left; /* buffers in highmem or foreign pages cannot cross page * boundaries */ - if (len + offset > PAGE_SIZE) - len = PAGE_SIZE - offset; + if (len + offset > XEN_PAGE_SIZE) + len = XEN_PAGE_SIZE - offset; cflush.op = 0; - cflush.a.dev_bus_addr = pfn << PAGE_SHIFT; + cflush.a.dev_bus_addr = xen_pfn << XEN_PAGE_SHIFT; cflush.offset = offset; cflush.length = len; @@ -79,7 +79,7 @@ static void dma_cache_maint(dma_addr_t handle, unsigned long offset, HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1); offset = 0; - pfn++; + xen_pfn++; left -= len; } while (left); } @@ -138,10 +138,29 @@ void __xen_dma_sync_single_for_device(struct device *hwdev, } bool xen_arch_need_swiotlb(struct device *dev, - unsigned long pfn, - unsigned long bfn) + phys_addr_t phys, + dma_addr_t dev_addr) { - return (!hypercall_cflush && (pfn != bfn) && !is_device_dma_coherent(dev)); + unsigned int xen_pfn = XEN_PFN_DOWN(phys); + unsigned int bfn = XEN_PFN_DOWN(dev_addr); + + /* + * The swiotlb buffer should be used if + * - Xen doesn't have the cache flush hypercall + * - The Linux page refers to foreign memory + * - The device doesn't support coherent DMA request + * + * The Linux page may be spanned acrros multiple Xen page, although + * it's not possible to have a mix of local and foreign Xen page. + * Furthermore, range_straddles_page_boundary is already checking + * if buffer is physically contiguous in the host RAM. + * + * Therefore we only need to check the first Xen page to know if we + * require a bounce buffer because the device doesn't support coherent + * memory and we are not able to flush the cache. + */ + return (!hypercall_cflush && (xen_pfn != bfn) && + !is_device_dma_coherent(dev)); } int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c index 887596c67..0ed01f2d5 100644 --- a/arch/arm/xen/p2m.c +++ b/arch/arm/xen/p2m.c @@ -93,8 +93,8 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, for (i = 0; i < count; i++) { if (map_ops[i].status) continue; - set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT, - map_ops[i].dev_bus_addr >> PAGE_SHIFT); + set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT, + map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT); } return 0; @@ -108,7 +108,7 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, int i; for (i = 0; i < count; i++) { - set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT, + set_phys_to_machine(unmap_ops[i].host_addr >> XEN_PAGE_SHIFT, INVALID_P2M_ENTRY); } |