From 03dd4cb26d967f9588437b0fc9cc0e8353322bb7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Fabian=20Silva=20Delgado?= Date: Fri, 25 Mar 2016 03:53:42 -0300 Subject: Linux-libre 4.5-gnu --- drivers/gpu/drm/i915/i915_gem.c | 165 ++++++++++++++++++++++++---------------- 1 file changed, 98 insertions(+), 67 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_gem.c') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f56af0aaa..bb44bad15 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2797,6 +2797,8 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, struct intel_engine_cs *ring) { + struct intel_ringbuffer *buffer; + while (!list_empty(&ring->active_list)) { struct drm_i915_gem_object *obj; @@ -2812,18 +2814,16 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, * are the ones that keep the context and ringbuffer backing objects * pinned in place. */ - while (!list_empty(&ring->execlist_queue)) { - struct drm_i915_gem_request *submit_req; - submit_req = list_first_entry(&ring->execlist_queue, - struct drm_i915_gem_request, - execlist_link); - list_del(&submit_req->execlist_link); + if (i915.enable_execlists) { + spin_lock_irq(&ring->execlist_lock); - if (submit_req->ctx != ring->default_context) - intel_lr_context_unpin(submit_req); + /* list_splice_tail_init checks for empty lists */ + list_splice_tail_init(&ring->execlist_queue, + &ring->execlist_retired_req_list); - i915_gem_request_unreference(submit_req); + spin_unlock_irq(&ring->execlist_lock); + intel_execlists_retire_requests(ring); } /* @@ -2842,6 +2842,18 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, i915_gem_request_retire(request); } + + /* Having flushed all requests from all queues, we know that all + * ringbuffers must now be empty. However, since we do not reclaim + * all space when retiring the request (to prevent HEADs colliding + * with rapid ringbuffer wraparound) the amount of available space + * upon reset is less than when we start. Do one more pass over + * all the ringbuffers to reset last_retired_head. + */ + list_for_each_entry(buffer, &ring->buffers, link) { + buffer->last_retired_head = buffer->tail; + intel_ring_update_space(buffer); + } } void i915_gem_reset(struct drm_device *dev) @@ -2982,6 +2994,10 @@ i915_gem_idle_work_handler(struct work_struct *work) if (!list_empty(&ring->request_list)) return; + /* we probably should sync with hangcheck here, using cancel_work_sync. + * Also locking seems to be fubar here, ring->request_list is protected + * by dev->struct_mutex. */ + intel_mark_idle(dev); if (mutex_trylock(&dev->struct_mutex)) { @@ -3106,7 +3122,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) if (ret == 0) ret = __i915_wait_request(req[i], reset_counter, true, args->timeout_ns > 0 ? &args->timeout_ns : NULL, - file->driver_priv); + to_rps_client(file)); i915_gem_request_unreference__unlocked(req[i]); } return ret; @@ -3472,7 +3488,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, if (flags & PIN_MAPPABLE) end = min_t(u64, end, dev_priv->gtt.mappable_end); if (flags & PIN_ZONE_4G) - end = min_t(u64, end, (1ULL << 32)); + end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE); if (alignment == 0) alignment = flags & PIN_MAPPABLE ? fence_alignment : @@ -3509,30 +3525,50 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, if (IS_ERR(vma)) goto err_unpin; - if (flags & PIN_HIGH) { - search_flag = DRM_MM_SEARCH_BELOW; - alloc_flag = DRM_MM_CREATE_TOP; + if (flags & PIN_OFFSET_FIXED) { + uint64_t offset = flags & PIN_OFFSET_MASK; + + if (offset & (alignment - 1) || offset + size > end) { + ret = -EINVAL; + goto err_free_vma; + } + vma->node.start = offset; + vma->node.size = size; + vma->node.color = obj->cache_level; + ret = drm_mm_reserve_node(&vm->mm, &vma->node); + if (ret) { + ret = i915_gem_evict_for_vma(vma); + if (ret == 0) + ret = drm_mm_reserve_node(&vm->mm, &vma->node); + } + if (ret) + goto err_free_vma; } else { - search_flag = DRM_MM_SEARCH_DEFAULT; - alloc_flag = DRM_MM_CREATE_DEFAULT; - } + if (flags & PIN_HIGH) { + search_flag = DRM_MM_SEARCH_BELOW; + alloc_flag = DRM_MM_CREATE_TOP; + } else { + search_flag = DRM_MM_SEARCH_DEFAULT; + alloc_flag = DRM_MM_CREATE_DEFAULT; + } search_free: - ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, - size, alignment, - obj->cache_level, - start, end, - search_flag, - alloc_flag); - if (ret) { - ret = i915_gem_evict_something(dev, vm, size, alignment, - obj->cache_level, - start, end, - flags); - if (ret == 0) - goto search_free; + ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, + size, alignment, + obj->cache_level, + start, end, + search_flag, + alloc_flag); + if (ret) { + ret = i915_gem_evict_something(dev, vm, size, alignment, + obj->cache_level, + start, end, + flags); + if (ret == 0) + goto search_free; - goto err_free_vma; + goto err_free_vma; + } } if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) { ret = -EINVAL; @@ -3886,7 +3922,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, * cacheline, whereas normally such cachelines would get * invalidated. */ - if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) + if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) return -ENODEV; level = I915_CACHE_LLC; @@ -3929,17 +3965,11 @@ rpm_put: int i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, - struct intel_engine_cs *pipelined, - struct drm_i915_gem_request **pipelined_request, const struct i915_ggtt_view *view) { u32 old_read_domains, old_write_domain; int ret; - ret = i915_gem_object_sync(obj, pipelined, pipelined_request); - if (ret) - return ret; - /* Mark the pin_display early so that we account for the * display coherency whilst setting up the cache domains. */ @@ -4129,6 +4159,10 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags) vma->node.start < (flags & PIN_OFFSET_MASK)) return true; + if (flags & PIN_OFFSET_FIXED && + vma->node.start != (flags & PIN_OFFSET_MASK)) + return true; + return false; } @@ -4391,6 +4425,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, } static const struct drm_i915_gem_object_ops i915_gem_object_ops = { + .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE, .get_pages = i915_gem_object_get_pages_gtt, .put_pages = i915_gem_object_put_pages_gtt, }; @@ -4541,10 +4576,8 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, { struct i915_vma *vma; list_for_each_entry(vma, &obj->vma_list, vma_link) { - if (i915_is_ggtt(vma->vm) && - vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) - continue; - if (vma->vm == vm) + if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL && + vma->vm == vm) return vma; } return NULL; @@ -4633,7 +4666,6 @@ int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice) struct intel_engine_cs *ring = req->ring; struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200); u32 *remap_info = dev_priv->l3_parity.remap_info[slice]; int i, ret; @@ -4649,10 +4681,10 @@ int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice) * here because no other code should access these registers other than * at initialization time. */ - for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { + for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) { intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit(ring, reg_base + i); - intel_ring_emit(ring, remap_info[i/4]); + intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i)); + intel_ring_emit(ring, remap_info[i]); } intel_ring_advance(ring); @@ -4820,18 +4852,9 @@ i915_gem_init_hw(struct drm_device *dev) if (HAS_GUC_UCODE(dev)) { ret = intel_guc_ucode_load(dev); if (ret) { - /* - * If we got an error and GuC submission is enabled, map - * the error to -EIO so the GPU will be declared wedged. - * OTOH, if we didn't intend to use the GuC anyway, just - * discard the error and carry on. - */ - DRM_ERROR("Failed to initialize GuC, error %d%s\n", ret, - i915.enable_guc_submission ? "" : - " (ignored)"); - ret = i915.enable_guc_submission ? -EIO : 0; - if (ret) - goto out; + DRM_ERROR("Failed to initialize GuC, error %d\n", ret); + ret = -EIO; + goto out; } } @@ -4894,14 +4917,6 @@ int i915_gem_init(struct drm_device *dev) mutex_lock(&dev->struct_mutex); - if (IS_VALLEYVIEW(dev)) { - /* VLVA0 (potential hack), BIOS isn't actually waking us */ - I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ); - if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & - VLV_GTLC_ALLOWWAKEACK), 10)) - DRM_DEBUG_DRIVER("allow wake ack timed out\n"); - } - if (!i915.enable_execlists) { dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission; dev_priv->gt.init_rings = i915_gem_init_rings; @@ -5019,7 +5034,7 @@ i915_gem_load(struct drm_device *dev) dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; - if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) + if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) dev_priv->num_fence_regs = 32; else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) dev_priv->num_fence_regs = 16; @@ -5240,6 +5255,21 @@ bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) return false; } +/* Like i915_gem_object_get_page(), but mark the returned page dirty */ +struct page * +i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n) +{ + struct page *page; + + /* Only default objects have per-page dirty tracking */ + if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0)) + return NULL; + + page = i915_gem_object_get_page(obj, n); + set_page_dirty(page); + return page; +} + /* Allocate a new GEM object and fill it with the supplied data */ struct drm_i915_gem_object * i915_gem_object_create_from_data(struct drm_device *dev, @@ -5265,6 +5295,7 @@ i915_gem_object_create_from_data(struct drm_device *dev, i915_gem_object_pin_pages(obj); sg = obj->pages; bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size); + obj->dirty = 1; /* Backing store is now out of date */ i915_gem_object_unpin_pages(obj); if (WARN_ON(bytes != size)) { -- cgit v1.2.3