diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 97cbcb7b016aa1e4f489a5841c513b29c716f109..f763b30f98d9643ff8f3531ec6e783b7132f01fa 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -505,6 +505,7 @@ struct i915_psr {
 	ktime_t last_entry_attempt;
 	ktime_t last_exit;
 	bool sink_not_reliable;
+	bool irq_aux_error;
 };
 
 enum intel_pch {
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index ab527f9a5436537fb6b9d9d4f59fe02db9640d0d..f0bfe0a5ff7ce810738c3fed5349bb9f8a97ebf4 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -169,6 +169,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
 	u32 transcoders = BIT(TRANSCODER_EDP);
 	enum transcoder cpu_transcoder;
 	ktime_t time_ns =  ktime_get();
+	u32 mask = 0;
 
 	if (INTEL_GEN(dev_priv) >= 8)
 		transcoders |= BIT(TRANSCODER_A) |
@@ -178,10 +179,22 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
 	for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
 		int shift = edp_psr_shift(cpu_transcoder);
 
-		/* FIXME: Exit PSR and link train manually when this happens. */
-		if (psr_iir & EDP_PSR_ERROR(shift))
-			DRM_DEBUG_KMS("[transcoder %s] PSR aux error\n",
-				      transcoder_name(cpu_transcoder));
+		if (psr_iir & EDP_PSR_ERROR(shift)) {
+			DRM_WARN("[transcoder %s] PSR aux error\n",
+				 transcoder_name(cpu_transcoder));
+
+			dev_priv->psr.irq_aux_error = true;
+
+			/*
+			 * If this interruption is not masked it will keep
+			 * interrupting so fast that it prevents the scheduled
+			 * work to run.
+			 * Also after a PSR error, we don't want to arm PSR
+			 * again so we don't care about unmask the interruption
+			 * or unset irq_aux_error.
+			 */
+			mask |= EDP_PSR_ERROR(shift);
+		}
 
 		if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) {
 			dev_priv->psr.last_entry_attempt = time_ns;
@@ -203,6 +216,13 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
 			}
 		}
 	}
+
+	if (mask) {
+		mask |= I915_READ(EDP_PSR_IMR);
+		I915_WRITE(EDP_PSR_IMR, mask);
+
+		schedule_work(&dev_priv->psr.work);
+	}
 }
 
 static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
@@ -938,6 +958,16 @@ int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
 	return ret;
 }
 
+static void intel_psr_handle_irq(struct drm_i915_private *dev_priv)
+{
+	struct i915_psr *psr = &dev_priv->psr;
+
+	intel_psr_disable_locked(psr->dp);
+	psr->sink_not_reliable = true;
+	/* let's make sure that sink is awaken */
+	drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+}
+
 static void intel_psr_work(struct work_struct *work)
 {
 	struct drm_i915_private *dev_priv =
@@ -948,6 +978,9 @@ static void intel_psr_work(struct work_struct *work)
 	if (!dev_priv->psr.enabled)
 		goto unlock;
 
+	if (READ_ONCE(dev_priv->psr.irq_aux_error))
+		intel_psr_handle_irq(dev_priv);
+
 	/*
 	 * We have to make sure PSR is ready for re-enable
 	 * otherwise it keeps disabled until next full enable/disable cycle.