summaryrefslogtreecommitdiff
path: root/drivers/hv/vmbus_drv.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/hv/vmbus_drv.c')
-rw-r--r--drivers/hv/vmbus_drv.c150
1 files changed, 109 insertions, 41 deletions
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 64713ff47..952f20fdc 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -41,6 +41,7 @@
#include <linux/ptrace.h>
#include <linux/screen_info.h>
#include <linux/kdebug.h>
+#include <linux/efi.h>
#include "hyperv_vmbus.h"
static struct acpi_device *hv_acpi_dev;
@@ -101,7 +102,10 @@ static struct notifier_block hyperv_panic_block = {
.notifier_call = hyperv_panic_event,
};
+static const char *fb_mmio_name = "fb_range";
+static struct resource *fb_mmio;
struct resource *hyperv_mmio;
+DEFINE_SEMAPHORE(hyperv_mmio_lock);
static int vmbus_exists(void)
{
@@ -708,7 +712,7 @@ static void hv_process_timer_expiration(struct hv_message *msg, int cpu)
if (dev->event_handler)
dev->event_handler(dev);
- vmbus_signal_eom(msg);
+ vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
}
void vmbus_on_msg_dpc(unsigned long data)
@@ -720,8 +724,9 @@ void vmbus_on_msg_dpc(unsigned long data)
struct vmbus_channel_message_header *hdr;
struct vmbus_channel_message_table_entry *entry;
struct onmessage_work_context *ctx;
+ u32 message_type = msg->header.message_type;
- if (msg->header.message_type == HVMSG_NONE)
+ if (message_type == HVMSG_NONE)
/* no msg */
return;
@@ -746,7 +751,7 @@ void vmbus_on_msg_dpc(unsigned long data)
entry->message_handler(hdr);
msg_handled:
- vmbus_signal_eom(msg);
+ vmbus_signal_eom(msg, message_type);
}
static void vmbus_isr(void)
@@ -1048,7 +1053,6 @@ static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
new_res->end = end;
/*
- * Stick ranges from higher in address space at the front of the list.
* If two ranges are adjacent, merge them.
*/
do {
@@ -1069,7 +1073,7 @@ static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
break;
}
- if ((*old_res)->end < new_res->start) {
+ if ((*old_res)->start > new_res->end) {
new_res->sibling = *old_res;
if (prev_res)
(*prev_res)->sibling = new_res;
@@ -1091,6 +1095,12 @@ static int vmbus_acpi_remove(struct acpi_device *device)
struct resource *next_res;
if (hyperv_mmio) {
+ if (fb_mmio) {
+ __release_region(hyperv_mmio, fb_mmio->start,
+ resource_size(fb_mmio));
+ fb_mmio = NULL;
+ }
+
for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
next_res = cur_res->sibling;
kfree(cur_res);
@@ -1100,6 +1110,30 @@ static int vmbus_acpi_remove(struct acpi_device *device)
return 0;
}
+static void vmbus_reserve_fb(void)
+{
+ int size;
+ /*
+ * Make a claim for the frame buffer in the resource tree under the
+ * first node, which will be the one below 4GB. The length seems to
+ * be underreported, particularly in a Generation 1 VM. So start out
+ * reserving a larger area and make it smaller until it succeeds.
+ */
+
+ if (screen_info.lfb_base) {
+ if (efi_enabled(EFI_BOOT))
+ size = max_t(__u32, screen_info.lfb_size, 0x800000);
+ else
+ size = max_t(__u32, screen_info.lfb_size, 0x4000000);
+
+ for (; !fb_mmio && (size >= 0x100000); size >>= 1) {
+ fb_mmio = __request_region(hyperv_mmio,
+ screen_info.lfb_base, size,
+ fb_mmio_name, 0);
+ }
+ }
+}
+
/**
* vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
* @new: If successful, supplied a pointer to the
@@ -1128,11 +1162,33 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
resource_size_t size, resource_size_t align,
bool fb_overlap_ok)
{
- struct resource *iter;
- resource_size_t range_min, range_max, start, local_min, local_max;
+ struct resource *iter, *shadow;
+ resource_size_t range_min, range_max, start;
const char *dev_n = dev_name(&device_obj->device);
- u32 fb_end = screen_info.lfb_base + (screen_info.lfb_size << 1);
- int i;
+ int retval;
+
+ retval = -ENXIO;
+ down(&hyperv_mmio_lock);
+
+ /*
+ * If overlaps with frame buffers are allowed, then first attempt to
+ * make the allocation from within the reserved region. Because it
+ * is already reserved, no shadow allocation is necessary.
+ */
+ if (fb_overlap_ok && fb_mmio && !(min > fb_mmio->end) &&
+ !(max < fb_mmio->start)) {
+
+ range_min = fb_mmio->start;
+ range_max = fb_mmio->end;
+ start = (range_min + align - 1) & ~(align - 1);
+ for (; start + size - 1 <= range_max; start += align) {
+ *new = request_mem_region_exclusive(start, size, dev_n);
+ if (*new) {
+ retval = 0;
+ goto exit;
+ }
+ }
+ }
for (iter = hyperv_mmio; iter; iter = iter->sibling) {
if ((iter->start >= max) || (iter->end <= min))
@@ -1140,46 +1196,56 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
range_min = iter->start;
range_max = iter->end;
-
- /* If this range overlaps the frame buffer, split it into
- two tries. */
- for (i = 0; i < 2; i++) {
- local_min = range_min;
- local_max = range_max;
- if (fb_overlap_ok || (range_min >= fb_end) ||
- (range_max <= screen_info.lfb_base)) {
- i++;
- } else {
- if ((range_min <= screen_info.lfb_base) &&
- (range_max >= screen_info.lfb_base)) {
- /*
- * The frame buffer is in this window,
- * so trim this into the part that
- * preceeds the frame buffer.
- */
- local_max = screen_info.lfb_base - 1;
- range_min = fb_end;
- } else {
- range_min = fb_end;
- continue;
- }
+ start = (range_min + align - 1) & ~(align - 1);
+ for (; start + size - 1 <= range_max; start += align) {
+ shadow = __request_region(iter, start, size, NULL,
+ IORESOURCE_BUSY);
+ if (!shadow)
+ continue;
+
+ *new = request_mem_region_exclusive(start, size, dev_n);
+ if (*new) {
+ shadow->name = (char *)*new;
+ retval = 0;
+ goto exit;
}
- start = (local_min + align - 1) & ~(align - 1);
- for (; start + size - 1 <= local_max; start += align) {
- *new = request_mem_region_exclusive(start, size,
- dev_n);
- if (*new)
- return 0;
- }
+ __release_region(iter, start, size);
}
}
- return -ENXIO;
+exit:
+ up(&hyperv_mmio_lock);
+ return retval;
}
EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
/**
+ * vmbus_free_mmio() - Free a memory-mapped I/O range.
+ * @start: Base address of region to release.
+ * @size: Size of the range to be allocated
+ *
+ * This function releases anything requested by
+ * vmbus_mmio_allocate().
+ */
+void vmbus_free_mmio(resource_size_t start, resource_size_t size)
+{
+ struct resource *iter;
+
+ down(&hyperv_mmio_lock);
+ for (iter = hyperv_mmio; iter; iter = iter->sibling) {
+ if ((iter->start >= start + size) || (iter->end <= start))
+ continue;
+
+ __release_region(iter, start, size);
+ }
+ release_mem_region(start, size);
+ up(&hyperv_mmio_lock);
+
+}
+EXPORT_SYMBOL_GPL(vmbus_free_mmio);
+
+/**
* vmbus_cpu_number_to_vp_number() - Map CPU to VP.
* @cpu_number: CPU number in Linux terms
*
@@ -1219,8 +1285,10 @@ static int vmbus_acpi_add(struct acpi_device *device)
if (ACPI_FAILURE(result))
continue;
- if (hyperv_mmio)
+ if (hyperv_mmio) {
+ vmbus_reserve_fb();
break;
+ }
}
ret_val = 0;