diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6b3142a99171ae4b3bdbed4e0cf402c2e8d68eee..1b539057b2646bb99dce5791db575e5c1e020b73 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2821,7 +2821,6 @@ static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv) void i915_gem_reset(struct drm_device *dev); bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); -int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); int __must_check i915_gem_init(struct drm_device *dev); int i915_gem_init_rings(struct drm_device *dev); int __must_check i915_gem_init_hw(struct drm_device *dev); @@ -2843,6 +2842,9 @@ int __i915_wait_request(struct drm_i915_gem_request *req, int __must_check i915_wait_request(struct drm_i915_gem_request *req); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int __must_check +i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, + bool readonly); +int __must_check i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write); int __must_check diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index fb48e72772528e6ac735401f5ff0f24a03f63bef..5ff96f94c2ee6dfe62da0026522e46f775bc8992 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -40,9 +40,6 @@ static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); -static __must_check int -i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, - bool readonly); static void i915_gem_object_retire(struct drm_i915_gem_object *obj); @@ -1397,7 +1394,7 @@ i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj) * Ensures that all rendering to the object has completed and the object is * safe to unbind from the GTT or access from the CPU. */ -static __must_check int +int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, bool readonly) { @@ -3078,7 +3075,7 @@ int i915_vma_unbind(struct i915_vma *vma) BUG_ON(obj->pages == NULL); - ret = i915_gem_object_finish_gpu(obj); + ret = i915_gem_object_wait_rendering(obj, false); if (ret) return ret; /* Continue on if we fail due to EIO, the GPU is hung so we @@ -3853,7 +3850,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, } if (i915_gem_obj_bound_any(obj)) { - ret = i915_gem_object_finish_gpu(obj); + ret = i915_gem_object_wait_rendering(obj, false); if (ret) return ret; @@ -4044,23 +4041,6 @@ i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, obj->pin_display--; } -int -i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj) -{ - int ret; - - if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) - return 0; - - ret = i915_gem_object_wait_rendering(obj, false); - if (ret) - return ret; - - /* Ensure that we invalidate the GPU's caches and TLBs. */ - obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; - return 0; -} - /** * Moves a single object to the CPU read, and possibly write domain. * diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index da64f0ed7b93482d811d343611810a98ecc2b576..c890e03939fac03058ab60ff8225c8ee09e240ff 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3295,27 +3295,30 @@ void intel_finish_reset(struct drm_device *dev) drm_modeset_unlock_all(dev); } -static int +static void intel_finish_fb(struct drm_framebuffer *old_fb) { struct drm_i915_gem_object *obj = intel_fb_obj(old_fb); - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); bool was_interruptible = dev_priv->mm.interruptible; int ret; /* Big Hammer, we also need to ensure that any pending * MI_WAIT_FOR_EVENT inside a user batch buffer on the * current scanout is retired before unpinning the old - * framebuffer. + * framebuffer. Note that we rely on userspace rendering + * into the buffer attached to the pipe they are waiting + * on. If not, userspace generates a GPU hang with IPEHR + * point to the MI_WAIT_FOR_EVENT. * * This should only fail upon a hung GPU, in which case we * can safely continue. */ dev_priv->mm.interruptible = false; - ret = i915_gem_object_finish_gpu(obj); + ret = i915_gem_object_wait_rendering(obj, true); dev_priv->mm.interruptible = was_interruptible; - return ret; + WARN_ON(ret); } static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)