diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 15440123e38d221c5671c4198b7e0558fae7722a..c5b7b8e0678a354d2a529c53d33c128d722f39c7 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1590,9 +1590,7 @@ static int i915_drm_resume(struct drm_device *dev)
 
 	intel_csr_ucode_resume(dev_priv);
 
-	mutex_lock(&dev->struct_mutex);
-	i915_gem_restore_gtt_mappings(dev);
-	mutex_unlock(&dev->struct_mutex);
+	i915_gem_resume(dev);
 
 	i915_restore_state(dev);
 	intel_opregion_setup(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index cd1ccc47ee8b3133e0c77e64f9e6f7066c9092ae..27d9b2c374b370fbc8b0bda67f1ab220c337c3a0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3384,6 +3384,7 @@ void i915_gem_init_swizzling(struct drm_device *dev);
 void i915_gem_cleanup_engines(struct drm_device *dev);
 int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv);
 int __must_check i915_gem_suspend(struct drm_device *dev);
+void i915_gem_resume(struct drm_device *dev);
 void __i915_add_request(struct drm_i915_gem_request *req,
 			struct drm_i915_gem_object *batch_obj,
 			bool flush_caches);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index cf0e8aa8035c045140b53aff02c508df647b3dac..4644a7e67cf6bae4fb8c5b2e84ff7962bf17f5f0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4983,12 +4983,30 @@ i915_gem_suspend(struct drm_device *dev)
 	intel_suspend_gt_powersave(dev_priv);
 
 	mutex_lock(&dev->struct_mutex);
+
+	/* We have to flush all the executing contexts to main memory so
+	 * that they can saved in the hibernation image. To ensure the last
+	 * context image is coherent, we have to switch away from it. That
+	 * leaves the dev_priv->kernel_context still active when
+	 * we actually suspend, and its image in memory may not match the GPU
+	 * state. Fortunately, the kernel_context is disposable and we do
+	 * not rely on its state.
+	 */
+	ret = i915_gem_switch_to_kernel_context(dev_priv);
+	if (ret)
+		goto err;
+
 	ret = i915_gem_wait_for_idle(dev_priv);
 	if (ret)
 		goto err;
 
 	i915_gem_retire_requests(dev_priv);
 
+	/* Note that rather than stopping the engines, all we have to do
+	 * is assert that every RING_HEAD == RING_TAIL (all execution complete)
+	 * and similar for all logical context images (to ensure they are
+	 * all ready for hibernation).
+	 */
 	i915_gem_stop_engines(dev);
 	i915_gem_context_lost(dev_priv);
 	mutex_unlock(&dev->struct_mutex);
@@ -5009,6 +5027,23 @@ i915_gem_suspend(struct drm_device *dev)
 	return ret;
 }
 
+void i915_gem_resume(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+
+	mutex_lock(&dev->struct_mutex);
+	i915_gem_restore_gtt_mappings(dev);
+
+	/* As we didn't flush the kernel context before suspend, we cannot
+	 * guarantee that the context image is complete. So let's just reset
+	 * it and start again.
+	 */
+	if (i915.enable_execlists)
+		intel_lr_context_reset(dev_priv, dev_priv->kernel_context);
+
+	mutex_unlock(&dev->struct_mutex);
+}
+
 void i915_gem_init_swizzling(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);