Commit cd17ef41 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'drm-intel-next-2013-02-01' of...

Merge tag 'drm-intel-next-2013-02-01' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

Daniel writes:
"Probably the last feature pull for 3.9, there's some fixes outstanding
thought that I'd like to sneak in. And maybe 3.8 takes a bit longer ...
Anyway, highlights of this pull:
- Kill the horrible IS_DISPLAYREG hack to handle the mmio offset movements
  on vlv, big thanks to Ville.
- Dynamic power well support for Haswell, shaves away a bit when only
  using the eDP port on pipe A (Paulo). Plus unclaimed register fixes
  uncovered by this.
- Clarifications of the gpu hang/reset state transitions, hopefully fixing
  a few spurious -EIO deaths in userspace.
- Haswell ELD fixes.
- Some more (pp)gtt cleanups from Ben.
- A few smaller things all over.

Plus all the stuff from the previous rather small pull request:
- Broadcast RBG improvements and reduced color range fixes from Ville.
- Ben is on a "kill legacy gtt code for good" spree, first pile of patches
  included.
- No-relocs and bo lut improvements for faster execbuf from Chris.
- Some refactorings from Imre."

* tag 'drm-intel-next-2013-02-01' of git://people.freedesktop.org/~danvet/drm-intel: (101 commits)
  GPU/i915: Fix acpi_bus_get_device() check in drivers/gpu/drm/i915/intel_opregion.c
  drm/i915: Set the SR01 "screen off" bit in i915_redisable_vga() too
  drm/i915: Kill IS_DISPLAYREG()
  drm/i915: Introduce i915_vgacntrl_reg()
  drm/i915: gen6_gmch_remove can be static
  drm/i915: dynamic Haswell display power well support
  drm/i915: check the power down well on assert_pipe()
  drm/i915: don't send DP "idle" pattern before "normal" on HSW PORT_A
  drm/i915: don't run hsw power well code on !hsw
  drm/i915: kill cargo-culted locking from power well code
  drm/i915: Only run idle processing from i915_gem_retire_requests_worker
  drm/i915: Fix CAGF for HSW
  drm/i915: Reclaim GTT space for failed PPGTT
  drm/i915: remove intel_gtt structure
  drm/i915: Add probe and remove to the gtt ops
  drm/i915: extract hw ppgtt setup/cleanup code
  drm/i915: pte_encode is gen6+
  drm/i915: vfuncs for ppgtt
  drm/i915: vfuncs for gtt_clear_range/insert_entries
  drm/i915: Error state should print /sys/kernel/debug
  ...
parents 67c96400 7d37beaa
...@@ -60,7 +60,6 @@ struct intel_gtt_driver { ...@@ -60,7 +60,6 @@ struct intel_gtt_driver {
}; };
static struct _intel_private { static struct _intel_private {
struct intel_gtt base;
const struct intel_gtt_driver *driver; const struct intel_gtt_driver *driver;
struct pci_dev *pcidev; /* device one */ struct pci_dev *pcidev; /* device one */
struct pci_dev *bridge_dev; struct pci_dev *bridge_dev;
...@@ -75,7 +74,18 @@ static struct _intel_private { ...@@ -75,7 +74,18 @@ static struct _intel_private {
struct resource ifp_resource; struct resource ifp_resource;
int resource_valid; int resource_valid;
struct page *scratch_page; struct page *scratch_page;
phys_addr_t scratch_page_dma;
int refcount; int refcount;
/* Whether i915 needs to use the dmar apis or not. */
unsigned int needs_dmar : 1;
phys_addr_t gma_bus_addr;
/* Size of memory reserved for graphics by the BIOS */
unsigned int stolen_size;
/* Total number of gtt entries. */
unsigned int gtt_total_entries;
/* Part of the gtt that is mappable by the cpu, for those chips where
* this is not the full gtt. */
unsigned int gtt_mappable_entries;
} intel_private; } intel_private;
#define INTEL_GTT_GEN intel_private.driver->gen #define INTEL_GTT_GEN intel_private.driver->gen
...@@ -291,15 +301,15 @@ static int intel_gtt_setup_scratch_page(void) ...@@ -291,15 +301,15 @@ static int intel_gtt_setup_scratch_page(void)
get_page(page); get_page(page);
set_pages_uc(page, 1); set_pages_uc(page, 1);
if (intel_private.base.needs_dmar) { if (intel_private.needs_dmar) {
dma_addr = pci_map_page(intel_private.pcidev, page, 0, dma_addr = pci_map_page(intel_private.pcidev, page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
return -EINVAL; return -EINVAL;
intel_private.base.scratch_page_dma = dma_addr; intel_private.scratch_page_dma = dma_addr;
} else } else
intel_private.base.scratch_page_dma = page_to_phys(page); intel_private.scratch_page_dma = page_to_phys(page);
intel_private.scratch_page = page; intel_private.scratch_page = page;
...@@ -506,7 +516,7 @@ static unsigned int intel_gtt_total_entries(void) ...@@ -506,7 +516,7 @@ static unsigned int intel_gtt_total_entries(void)
/* On previous hardware, the GTT size was just what was /* On previous hardware, the GTT size was just what was
* required to map the aperture. * required to map the aperture.
*/ */
return intel_private.base.gtt_mappable_entries; return intel_private.gtt_mappable_entries;
} }
} }
...@@ -546,7 +556,7 @@ static unsigned int intel_gtt_mappable_entries(void) ...@@ -546,7 +556,7 @@ static unsigned int intel_gtt_mappable_entries(void)
static void intel_gtt_teardown_scratch_page(void) static void intel_gtt_teardown_scratch_page(void)
{ {
set_pages_wb(intel_private.scratch_page, 1); set_pages_wb(intel_private.scratch_page, 1);
pci_unmap_page(intel_private.pcidev, intel_private.base.scratch_page_dma, pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
put_page(intel_private.scratch_page); put_page(intel_private.scratch_page);
__free_page(intel_private.scratch_page); __free_page(intel_private.scratch_page);
...@@ -572,8 +582,8 @@ static int intel_gtt_init(void) ...@@ -572,8 +582,8 @@ static int intel_gtt_init(void)
if (ret != 0) if (ret != 0)
return ret; return ret;
intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries(); intel_private.gtt_mappable_entries = intel_gtt_mappable_entries();
intel_private.base.gtt_total_entries = intel_gtt_total_entries(); intel_private.gtt_total_entries = intel_gtt_total_entries();
/* save the PGETBL reg for resume */ /* save the PGETBL reg for resume */
intel_private.PGETBL_save = intel_private.PGETBL_save =
...@@ -585,10 +595,10 @@ static int intel_gtt_init(void) ...@@ -585,10 +595,10 @@ static int intel_gtt_init(void)
dev_info(&intel_private.bridge_dev->dev, dev_info(&intel_private.bridge_dev->dev,
"detected gtt size: %dK total, %dK mappable\n", "detected gtt size: %dK total, %dK mappable\n",
intel_private.base.gtt_total_entries * 4, intel_private.gtt_total_entries * 4,
intel_private.base.gtt_mappable_entries * 4); intel_private.gtt_mappable_entries * 4);
gtt_map_size = intel_private.base.gtt_total_entries * 4; gtt_map_size = intel_private.gtt_total_entries * 4;
intel_private.gtt = NULL; intel_private.gtt = NULL;
if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2) if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2)
...@@ -605,9 +615,9 @@ static int intel_gtt_init(void) ...@@ -605,9 +615,9 @@ static int intel_gtt_init(void)
global_cache_flush(); /* FIXME: ? */ global_cache_flush(); /* FIXME: ? */
intel_private.base.stolen_size = intel_gtt_stolen_size(); intel_private.stolen_size = intel_gtt_stolen_size();
intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2; intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
ret = intel_gtt_setup_scratch_page(); ret = intel_gtt_setup_scratch_page();
if (ret != 0) { if (ret != 0) {
...@@ -622,7 +632,7 @@ static int intel_gtt_init(void) ...@@ -622,7 +632,7 @@ static int intel_gtt_init(void)
pci_read_config_dword(intel_private.pcidev, I915_GMADDR, pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
&gma_addr); &gma_addr);
intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
return 0; return 0;
} }
...@@ -633,8 +643,7 @@ static int intel_fake_agp_fetch_size(void) ...@@ -633,8 +643,7 @@ static int intel_fake_agp_fetch_size(void)
unsigned int aper_size; unsigned int aper_size;
int i; int i;
aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT) aper_size = (intel_private.gtt_mappable_entries << PAGE_SHIFT) / MB(1);
/ MB(1);
for (i = 0; i < num_sizes; i++) { for (i = 0; i < num_sizes; i++) {
if (aper_size == intel_fake_agp_sizes[i].size) { if (aper_size == intel_fake_agp_sizes[i].size) {
...@@ -778,7 +787,7 @@ static int intel_fake_agp_configure(void) ...@@ -778,7 +787,7 @@ static int intel_fake_agp_configure(void)
return -EIO; return -EIO;
intel_private.clear_fake_agp = true; intel_private.clear_fake_agp = true;
agp_bridge->gart_bus_addr = intel_private.base.gma_bus_addr; agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
return 0; return 0;
} }
...@@ -840,12 +849,9 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem, ...@@ -840,12 +849,9 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
{ {
int ret = -EINVAL; int ret = -EINVAL;
if (intel_private.base.do_idle_maps)
return -ENODEV;
if (intel_private.clear_fake_agp) { if (intel_private.clear_fake_agp) {
int start = intel_private.base.stolen_size / PAGE_SIZE; int start = intel_private.stolen_size / PAGE_SIZE;
int end = intel_private.base.gtt_mappable_entries; int end = intel_private.gtt_mappable_entries;
intel_gtt_clear_range(start, end - start); intel_gtt_clear_range(start, end - start);
intel_private.clear_fake_agp = false; intel_private.clear_fake_agp = false;
} }
...@@ -856,7 +862,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem, ...@@ -856,7 +862,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
if (mem->page_count == 0) if (mem->page_count == 0)
goto out; goto out;
if (pg_start + mem->page_count > intel_private.base.gtt_total_entries) if (pg_start + mem->page_count > intel_private.gtt_total_entries)
goto out_err; goto out_err;
if (type != mem->type) if (type != mem->type)
...@@ -868,7 +874,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem, ...@@ -868,7 +874,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
if (!mem->is_flushed) if (!mem->is_flushed)
global_cache_flush(); global_cache_flush();
if (intel_private.base.needs_dmar) { if (intel_private.needs_dmar) {
struct sg_table st; struct sg_table st;
ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st); ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
...@@ -894,7 +900,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries) ...@@ -894,7 +900,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
unsigned int i; unsigned int i;
for (i = first_entry; i < (first_entry + num_entries); i++) { for (i = first_entry; i < (first_entry + num_entries); i++) {
intel_private.driver->write_entry(intel_private.base.scratch_page_dma, intel_private.driver->write_entry(intel_private.scratch_page_dma,
i, 0); i, 0);
} }
readl(intel_private.gtt+i-1); readl(intel_private.gtt+i-1);
...@@ -907,12 +913,9 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem, ...@@ -907,12 +913,9 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem,
if (mem->page_count == 0) if (mem->page_count == 0)
return 0; return 0;
if (intel_private.base.do_idle_maps)
return -ENODEV;
intel_gtt_clear_range(pg_start, mem->page_count); intel_gtt_clear_range(pg_start, mem->page_count);
if (intel_private.base.needs_dmar) { if (intel_private.needs_dmar) {
intel_gtt_unmap_memory(mem->sg_list, mem->num_sg); intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
mem->sg_list = NULL; mem->sg_list = NULL;
mem->num_sg = 0; mem->num_sg = 0;
...@@ -1069,24 +1072,6 @@ static void i965_write_entry(dma_addr_t addr, ...@@ -1069,24 +1072,6 @@ static void i965_write_entry(dma_addr_t addr,
writel(addr | pte_flags, intel_private.gtt + entry); writel(addr | pte_flags, intel_private.gtt + entry);
} }
/* Certain Gen5 chipsets require require idling the GPU before
* unmapping anything from the GTT when VT-d is enabled.
*/
static inline int needs_idle_maps(void)
{
#ifdef CONFIG_INTEL_IOMMU
const unsigned short gpu_devid = intel_private.pcidev->device;
/* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first.
*/
if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
intel_iommu_gfx_mapped)
return 1;
#endif
return 0;
}
static int i9xx_setup(void) static int i9xx_setup(void)
{ {
...@@ -1115,9 +1100,6 @@ static int i9xx_setup(void) ...@@ -1115,9 +1100,6 @@ static int i9xx_setup(void)
break; break;
} }
if (needs_idle_maps())
intel_private.base.do_idle_maps = 1;
intel_i9xx_setup_flush(); intel_i9xx_setup_flush();
return 0; return 0;
...@@ -1389,9 +1371,10 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, ...@@ -1389,9 +1371,10 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
} }
EXPORT_SYMBOL(intel_gmch_probe); EXPORT_SYMBOL(intel_gmch_probe);
struct intel_gtt *intel_gtt_get(void) void intel_gtt_get(size_t *gtt_total, size_t *stolen_size)
{ {
return &intel_private.base; *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
*stolen_size = intel_private.stolen_size;
} }
EXPORT_SYMBOL(intel_gtt_get); EXPORT_SYMBOL(intel_gtt_get);
......
...@@ -1483,9 +1483,11 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid, ...@@ -1483,9 +1483,11 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
#define VIDEO_BLOCK 0x02 #define VIDEO_BLOCK 0x02
#define VENDOR_BLOCK 0x03 #define VENDOR_BLOCK 0x03
#define SPEAKER_BLOCK 0x04 #define SPEAKER_BLOCK 0x04
#define VIDEO_CAPABILITY_BLOCK 0x07
#define EDID_BASIC_AUDIO (1 << 6) #define EDID_BASIC_AUDIO (1 << 6)
#define EDID_CEA_YCRCB444 (1 << 5) #define EDID_CEA_YCRCB444 (1 << 5)
#define EDID_CEA_YCRCB422 (1 << 4) #define EDID_CEA_YCRCB422 (1 << 4)
#define EDID_CEA_VCDB_QS (1 << 6)
/** /**
* Search EDID for CEA extension block. * Search EDID for CEA extension block.
...@@ -1901,6 +1903,37 @@ bool drm_detect_monitor_audio(struct edid *edid) ...@@ -1901,6 +1903,37 @@ bool drm_detect_monitor_audio(struct edid *edid)
} }
EXPORT_SYMBOL(drm_detect_monitor_audio); EXPORT_SYMBOL(drm_detect_monitor_audio);
/**
* drm_rgb_quant_range_selectable - is RGB quantization range selectable?
*
* Check whether the monitor reports the RGB quantization range selection
* as supported. The AVI infoframe can then be used to inform the monitor
* which quantization range (full or limited) is used.
*/
bool drm_rgb_quant_range_selectable(struct edid *edid)
{
u8 *edid_ext;
int i, start, end;
edid_ext = drm_find_cea_extension(edid);
if (!edid_ext)
return false;
if (cea_db_offsets(edid_ext, &start, &end))
return false;
for_each_cea_db(edid_ext, i, start, end) {
if (cea_db_tag(&edid_ext[i]) == VIDEO_CAPABILITY_BLOCK &&
cea_db_payload_len(&edid_ext[i]) == 2) {
DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", edid_ext[i + 2]);
return edid_ext[i + 2] & EDID_CEA_VCDB_QS;
}
}
return false;
}
EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
/** /**
* drm_add_display_info - pull display info out if present * drm_add_display_info - pull display info out if present
* @edid: EDID data * @edid: EDID data
......
...@@ -16,6 +16,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \ ...@@ -16,6 +16,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
i915_gem_tiling.o \ i915_gem_tiling.o \
i915_sysfs.o \ i915_sysfs.o \
i915_trace_points.o \ i915_trace_points.o \
i915_ums.o \
intel_display.o \ intel_display.o \
intel_crt.o \ intel_crt.o \
intel_lvds.o \ intel_lvds.o \
......
...@@ -258,8 +258,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data) ...@@ -258,8 +258,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
seq_printf(m, "%u fault mappable objects, %zu bytes\n", seq_printf(m, "%u fault mappable objects, %zu bytes\n",
count, size); count, size);
seq_printf(m, "%zu [%zu] gtt total\n", seq_printf(m, "%zu [%lu] gtt total\n",
dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total); dev_priv->gtt.total,
dev_priv->gtt.mappable_end - dev_priv->gtt.start);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -813,11 +814,11 @@ static int i915_error_state_open(struct inode *inode, struct file *file) ...@@ -813,11 +814,11 @@ static int i915_error_state_open(struct inode *inode, struct file *file)
error_priv->dev = dev; error_priv->dev = dev;
spin_lock_irqsave(&dev_priv->error_lock, flags); spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
error_priv->error = dev_priv->first_error; error_priv->error = dev_priv->gpu_error.first_error;
if (error_priv->error) if (error_priv->error)
kref_get(&error_priv->error->ref); kref_get(&error_priv->error->ref);
spin_unlock_irqrestore(&dev_priv->error_lock, flags); spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
return single_open(file, i915_error_state, error_priv); return single_open(file, i915_error_state, error_priv);
} }
...@@ -956,7 +957,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) ...@@ -956,7 +957,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
u32 rpstat; u32 rpstat, cagf;
u32 rpupei, rpcurup, rpprevup; u32 rpupei, rpcurup, rpprevup;
u32 rpdownei, rpcurdown, rpprevdown; u32 rpdownei, rpcurdown, rpprevdown;
int max_freq; int max_freq;
...@@ -975,6 +976,11 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) ...@@ -975,6 +976,11 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
if (IS_HASWELL(dev))
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
else
cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
cagf *= GT_FREQUENCY_MULTIPLIER;
gen6_gt_force_wake_put(dev_priv); gen6_gt_force_wake_put(dev_priv);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -987,8 +993,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) ...@@ -987,8 +993,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
gt_perf_status & 0xff); gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n", seq_printf(m, "Render p-state limit: %d\n",
rp_state_limits & 0xff); rp_state_limits & 0xff);
seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> seq_printf(m, "CAGF: %dMHz\n", cagf);
GEN6_CAGF_SHIFT) * GT_FREQUENCY_MULTIPLIER);
seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
GEN6_CURICONT_MASK); GEN6_CURICONT_MASK);
seq_printf(m, "RP CUR UP: %dus\n", rpcurup & seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
...@@ -1674,7 +1679,7 @@ i915_wedged_read(struct file *filp, ...@@ -1674,7 +1679,7 @@ i915_wedged_read(struct file *filp,
len = snprintf(buf, sizeof(buf), len = snprintf(buf, sizeof(buf),
"wedged : %d\n", "wedged : %d\n",
atomic_read(&dev_priv->mm.wedged)); atomic_read(&dev_priv->gpu_error.reset_counter));
if (len > sizeof(buf)) if (len > sizeof(buf))
len = sizeof(buf); len = sizeof(buf);
...@@ -1729,7 +1734,7 @@ i915_ring_stop_read(struct file *filp, ...@@ -1729,7 +1734,7 @@ i915_ring_stop_read(struct file *filp,
int len; int len;
len = snprintf(buf, sizeof(buf), len = snprintf(buf, sizeof(buf),
"0x%08x\n", dev_priv->stop_rings); "0x%08x\n", dev_priv->gpu_error.stop_rings);
if (len > sizeof(buf)) if (len > sizeof(buf))
len = sizeof(buf); len = sizeof(buf);
...@@ -1765,7 +1770,7 @@ i915_ring_stop_write(struct file *filp, ...@@ -1765,7 +1770,7 @@ i915_ring_stop_write(struct file *filp,
if (ret) if (ret)
return ret; return ret;
dev_priv->stop_rings = val; dev_priv->gpu_error.stop_rings = val;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return cnt; return cnt;
...@@ -1779,6 +1784,102 @@ static const struct file_operations i915_ring_stop_fops = { ...@@ -1779,6 +1784,102 @@ static const struct file_operations i915_ring_stop_fops = {
.llseek = default_llseek, .llseek = default_llseek,
}; };
#define DROP_UNBOUND 0x1
#define DROP_BOUND 0x2
#define DROP_RETIRE 0x4
#define DROP_ACTIVE 0x8
#define DROP_ALL (DROP_UNBOUND | \
DROP_BOUND | \
DROP_RETIRE | \
DROP_ACTIVE)
static ssize_t
i915_drop_caches_read(struct file *filp,
char __user *ubuf,
size_t max,
loff_t *ppos)
{
char buf[20];
int len;
len = snprintf(buf, sizeof(buf), "0x%08x\n", DROP_ALL);
if (len > sizeof(buf))
len = sizeof(buf);
return simple_read_from_buffer(ubuf, max, ppos, buf, len);
}
static ssize_t
i915_drop_caches_write(struct file *filp,
const char __user *ubuf,
size_t cnt,
loff_t *ppos)
{
struct drm_device *dev = filp->private_data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next;
char buf[20];
int val = 0, ret;
if (cnt > 0) {
if (cnt > sizeof(buf) - 1)
return -EINVAL;
if (copy_from_user(buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
val = simple_strtoul(buf, NULL, 0);
}
DRM_DEBUG_DRIVER("Dropping caches: 0x%08x\n", val);
/* No need to check and wait for gpu resets, only libdrm auto-restarts
* on ioctls on -EAGAIN. */
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
if (val & DROP_ACTIVE) {
ret = i915_gpu_idle(dev);
if (ret)
goto unlock;
}
if (val & (DROP_RETIRE | DROP_ACTIVE))
i915_gem_retire_requests(dev);
if (val & DROP_BOUND) {
list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list)
if (obj->pin_count == 0) {
ret = i915_gem_object_unbind(obj);
if (ret)
goto unlock;
}
}
if (val & DROP_UNBOUND) {
list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
if (obj->pages_pin_count == 0) {
ret = i915_gem_object_put_pages(obj);
if (ret)
goto unlock;
}
}
unlock:
mutex_unlock(&dev->struct_mutex);
return ret ?: cnt;
}
static const struct file_operations i915_drop_caches_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = i915_drop_caches_read,
.write = i915_drop_caches_write,
.llseek = default_llseek,
};
static ssize_t static ssize_t
i915_max_freq_read(struct file *filp, i915_max_freq_read(struct file *filp,
char __user *ubuf, char __user *ubuf,
...@@ -2175,6 +2276,12 @@ int i915_debugfs_init(struct drm_minor *minor) ...@@ -2175,6 +2276,12 @@ int i915_debugfs_init(struct drm_minor *minor)
if (ret) if (ret)
return ret; return ret;
ret = i915_debugfs_create(minor->debugfs_root, minor,
"i915_gem_drop_caches",
&i915_drop_caches_fops);
if (ret)
return ret;
ret = i915_debugfs_create(minor->debugfs_root, minor, ret = i915_debugfs_create(minor->debugfs_root, minor,
"i915_error_state", "i915_error_state",
&i915_error_state_fops); &i915_error_state_fops);
...@@ -2206,6 +2313,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor) ...@@ -2206,6 +2313,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
1, minor); 1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops, drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
1, minor); 1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_drop_caches_fops,
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops, drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
1, minor); 1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops, drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
......
...@@ -992,6 +992,12 @@ static int i915_getparam(struct drm_device *dev, void *data, ...@@ -992,6 +992,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_PINNED_BATCHES: case I915_PARAM_HAS_PINNED_BATCHES:
value = 1; value = 1;
break; break;
case I915_PARAM_HAS_EXEC_NO_RELOC:
value = 1;
break;
case I915_PARAM_HAS_EXEC_HANDLE_LUT:
value = 1;
break;
default: default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n", DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param); param->param);
...@@ -1070,7 +1076,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, ...@@ -1070,7 +1076,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
dev_priv->dri1.gfx_hws_cpu_addr = dev_priv->dri1.gfx_hws_cpu_addr =
ioremap_wc(dev_priv->mm.gtt_base_addr + hws->addr, 4096); ioremap_wc(dev_priv->gtt.mappable_base + hws->addr, 4096);
if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) { if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
i915_dma_cleanup(dev); i915_dma_cleanup(dev);
ring->status_page.gfx_addr = 0; ring->status_page.gfx_addr = 0;
...@@ -1420,9 +1426,9 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) ...@@ -1420,9 +1426,9 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
if (!ap) if (!ap)
return; return;
ap->ranges[0].base = dev_priv->mm.gtt->gma_bus_addr; ap->ranges[0].base = dev_priv->gtt.mappable_base;
ap->ranges[0].size = ap->ranges[0].size = dev_priv->gtt.mappable_end - dev_priv->gtt.start;
dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
primary = primary =
pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
...@@ -1536,18 +1542,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1536,18 +1542,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto put_gmch; goto put_gmch;
} }
aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; aperture_size = dev_priv->gtt.mappable_end;
dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr;
dev_priv->mm.gtt_mapping = dev_priv->gtt.mappable =
io_mapping_create_wc(dev_priv->mm.gtt_base_addr, io_mapping_create_wc(dev_priv->gtt.mappable_base,
aperture_size); aperture_size);
if (dev_priv->mm.gtt_mapping == NULL) { if (dev_priv->gtt.mappable == NULL) {
ret = -EIO; ret = -EIO;
goto out_rmmap; goto out_rmmap;
} }
i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr, i915_mtrr_setup(dev_priv, dev_priv->gtt.mappable_base,
aperture_size); aperture_size);
/* The i915 workqueue is primarily used for batched retirement of /* The i915 workqueue is primarily used for batched retirement of
...@@ -1600,7 +1605,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1600,7 +1605,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
pci_enable_msi(dev->pdev); pci_enable_msi(dev->pdev);
spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock); spin_lock_init(&dev_priv->gpu_error.lock);
spin_lock_init(&dev_priv->rps.lock); spin_lock_init(&dev_priv->rps.lock);
mutex_init(&dev_priv->dpio_lock); mutex_init(&dev_priv->dpio_lock);
...@@ -1652,15 +1657,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1652,15 +1657,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
out_mtrrfree: out_mtrrfree:
if (dev_priv->mm.gtt_mtrr >= 0) { if (dev_priv->mm.gtt_mtrr >= 0) {
mtrr_del(dev_priv->mm.gtt_mtrr, mtrr_del(dev_priv->mm.gtt_mtrr,
dev_priv->mm.gtt_base_addr, dev_priv->gtt.mappable_base,
aperture_size); aperture_size);
dev_priv->mm.gtt_mtrr = -1; dev_priv->mm.gtt_mtrr = -1;
} }
io_mapping_free(dev_priv->mm.gtt_mapping); io_mapping_free(dev_priv->gtt.mappable);
out_rmmap: out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs); pci_iounmap(dev->pdev, dev_priv->regs);
put_gmch: put_gmch:
i915_gem_gtt_fini(dev); dev_priv->gtt.gtt_remove(dev);
put_bridge: put_bridge:
pci_dev_put(dev_priv->bridge_dev); pci_dev_put(dev_priv->bridge_dev);
free_priv: free_priv:
...@@ -1690,11 +1695,11 @@ int i915_driver_unload(struct drm_device *dev) ...@@ -1690,11 +1695,11 @@ int i915_driver_unload(struct drm_device *dev)
/* Cancel the retire work handler, which should be idle now. */ /* Cancel the retire work handler, which should be idle now. */
cancel_delayed_work_sync(&dev_priv->mm.retire_work); cancel_delayed_work_sync(&dev_priv->mm.retire_work);
io_mapping_free(dev_priv->mm.gtt_mapping); io_mapping_free(dev_priv->gtt.mappable);
if (dev_priv->mm.gtt_mtrr >= 0) { if (dev_priv->mm.gtt_mtrr >= 0) {
mtrr_del(dev_priv->mm.gtt_mtrr, mtrr_del(dev_priv->mm.gtt_mtrr,
dev_priv->mm.gtt_base_addr, dev_priv->gtt.mappable_base,
dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE); dev_priv->gtt.mappable_end);
dev_priv->mm.gtt_mtrr = -1; dev_priv->mm.gtt_mtrr = -1;
} }
...@@ -1720,8 +1725,8 @@ int i915_driver_unload(struct drm_device *dev) ...@@ -1720,8 +1725,8 @@ int i915_driver_unload(struct drm_device *dev)
} }
/* Free error state after interrupts are fully disabled. */ /* Free error state after interrupts are fully disabled. */
del_timer_sync(&dev_priv->hangcheck_timer); del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
cancel_work_sync(&dev_priv->error_work); cancel_work_sync(&dev_priv->gpu_error.work);
i915_destroy_error_state(dev); i915_destroy_error_state(dev);
if (dev->pdev->msi_enabled) if (dev->pdev->msi_enabled)
......
...@@ -276,6 +276,7 @@ static const struct intel_device_info intel_valleyview_m_info = { ...@@ -276,6 +276,7 @@ static const struct intel_device_info intel_valleyview_m_info = {
.has_bsd_ring = 1, .has_bsd_ring = 1,
.has_blt_ring = 1, .has_blt_ring = 1,
.is_valleyview = 1, .is_valleyview = 1,
.display_mmio_offset = VLV_DISPLAY_BASE,
}; };
static const struct intel_device_info intel_valleyview_d_info = { static const struct intel_device_info intel_valleyview_d_info = {
...@@ -285,6 +286,7 @@ static const struct intel_device_info intel_valleyview_d_info = { ...@@ -285,6 +286,7 @@ static const struct intel_device_info intel_valleyview_d_info = {
.has_bsd_ring = 1, .has_bsd_ring = 1,
.has_blt_ring = 1, .has_blt_ring = 1,
.is_valleyview = 1, .is_valleyview = 1,
.display_mmio_offset = VLV_DISPLAY_BASE,
}; };
static const struct intel_device_info intel_haswell_d_info = { static const struct intel_device_info intel_haswell_d_info = {
...@@ -468,6 +470,8 @@ static int i915_drm_freeze(struct drm_device *dev) ...@@ -468,6 +470,8 @@ static int i915_drm_freeze(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
intel_set_power_well(dev, true);
drm_kms_helper_poll_disable(dev); drm_kms_helper_poll_disable(dev);
pci_save_state(dev->pdev); pci_save_state(dev->pdev);
...@@ -779,9 +783,9 @@ int intel_gpu_reset(struct drm_device *dev) ...@@ -779,9 +783,9 @@ int intel_gpu_reset(struct drm_device *dev)
} }
/* Also reset the gpu hangman. */ /* Also reset the gpu hangman. */
if (dev_priv->stop_rings) { if (dev_priv->gpu_error.stop_rings) {
DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n"); DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
dev_priv->stop_rings = 0; dev_priv->gpu_error.stop_rings = 0;
if (ret == -ENODEV) { if (ret == -ENODEV) {
DRM_ERROR("Reset not implemented, but ignoring " DRM_ERROR("Reset not implemented, but ignoring "
"error for simulated gpu hangs\n"); "error for simulated gpu hangs\n");
...@@ -820,12 +824,12 @@ int i915_reset(struct drm_device *dev) ...@@ -820,12 +824,12 @@ int i915_reset(struct drm_device *dev)
i915_gem_reset(dev); i915_gem_reset(dev);
ret = -ENODEV; ret = -ENODEV;
if (get_seconds() - dev_priv->last_gpu_reset < 5) if (get_seconds() - dev_priv->gpu_error.last_reset < 5)
DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
else else
ret = intel_gpu_reset(dev); ret = intel_gpu_reset(dev);
dev_priv->last_gpu_reset = get_seconds(); dev_priv->gpu_error.last_reset = get_seconds();
if (ret) { if (ret) {
DRM_ERROR("Failed to reset chip.\n"); DRM_ERROR("Failed to reset chip.\n");
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -1115,102 +1119,6 @@ MODULE_LICENSE("GPL and additional rights"); ...@@ -1115,102 +1119,6 @@ MODULE_LICENSE("GPL and additional rights");
((HAS_FORCE_WAKE((dev_priv)->dev)) && \ ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
((reg) < 0x40000) && \ ((reg) < 0x40000) && \
((reg) != FORCEWAKE)) ((reg) != FORCEWAKE))
static bool IS_DISPLAYREG(u32 reg)
{
/*
* This should make it easier to transition modules over to the
* new register block scheme, since we can do it incrementally.
*/
if (reg >= VLV_DISPLAY_BASE)
return false;
if (reg >= RENDER_RING_BASE &&
reg < RENDER_RING_BASE + 0xff)
return false;
if (reg >= GEN6_BSD_RING_BASE &&
reg < GEN6_BSD_RING_BASE + 0xff)
return false;
if (reg >= BLT_RING_BASE &&
reg < BLT_RING_BASE + 0xff)
return false;
if (reg == PGTBL_ER)
return false;
if (reg >= IPEIR_I965 &&
reg < HWSTAM)
return false;
if (reg == MI_MODE)
return false;
if (reg == GFX_MODE_GEN7)
return false;
if (reg == RENDER_HWS_PGA_GEN7 ||
reg == BSD_HWS_PGA_GEN7 ||
reg == BLT_HWS_PGA_GEN7)
return false;
if (reg == GEN6_BSD_SLEEP_PSMI_CONTROL ||
reg == GEN6_BSD_RNCID)
return false;
if (reg == GEN6_BLITTER_ECOSKPD)
return false;
if (reg >= 0x4000c &&
reg <= 0x4002c)
return false;
if (reg >= 0x4f000 &&
reg <= 0x4f08f)
return false;
if (reg >= 0x4f100 &&
reg <= 0x4f11f)
return false;
if (reg >= VLV_MASTER_IER &&
reg <= GEN6_PMIER)
return false;
if (reg >= FENCE_REG_SANDYBRIDGE_0 &&
reg < (FENCE_REG_SANDYBRIDGE_0 + (16*8)))
return false;
if (reg >= VLV_IIR_RW &&
reg <= VLV_ISR)
return false;
if (reg == FORCEWAKE_VLV ||
reg == FORCEWAKE_ACK_VLV)
return false;
if (reg == GEN6_GDRST)
return false;
switch (reg) {
case _3D_CHICKEN3:
case IVB_CHICKEN3:
case GEN7_COMMON_SLICE_CHICKEN1:
case GEN7_L3CNTLREG1:
case GEN7_L3_CHICKEN_MODE_REGISTER:
case GEN7_ROW_CHICKEN2:
case GEN7_L3SQCREG4:
case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG:
case GEN7_HALF_SLICE_CHICKEN1:
case GEN6_MBCTL:
case GEN6_UCGCTL2:
return false;
default:
break;
}
return true;
}
static void static void
ilk_dummy_write(struct drm_i915_private *dev_priv) ilk_dummy_write(struct drm_i915_private *dev_priv)
{ {
...@@ -1234,8 +1142,6 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ ...@@ -1234,8 +1142,6 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
if (dev_priv->forcewake_count == 0) \ if (dev_priv->forcewake_count == 0) \
dev_priv->gt.force_wake_put(dev_priv); \ dev_priv->gt.force_wake_put(dev_priv); \
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
} else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
val = read##y(dev_priv->regs + reg + 0x180000); \
} else { \ } else { \
val = read##y(dev_priv->regs + reg); \ val = read##y(dev_priv->regs + reg); \
} \ } \
...@@ -1262,11 +1168,7 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ ...@@ -1262,11 +1168,7 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \ DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \ I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
} \ } \
if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \ write##y(val, dev_priv->regs + reg); \
write##y(val, dev_priv->regs + reg + 0x180000); \
} else { \
write##y(val, dev_priv->regs + reg); \
} \
if (unlikely(__fifo_ret)) { \ if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \ gen6_gt_check_fifodbg(dev_priv); \
} \ } \
......
...@@ -337,6 +337,7 @@ struct drm_i915_gt_funcs { ...@@ -337,6 +337,7 @@ struct drm_i915_gt_funcs {
DEV_INFO_FLAG(has_llc) DEV_INFO_FLAG(has_llc)
struct intel_device_info { struct intel_device_info {
u32 display_mmio_offset;
u8 gen; u8 gen;
u8 is_mobile:1; u8 is_mobile:1;
u8 is_i85x:1; u8 is_i85x:1;
...@@ -364,6 +365,49 @@ struct intel_device_info { ...@@ -364,6 +365,49 @@ struct intel_device_info {
u8 has_llc:1; u8 has_llc:1;
}; };
enum i915_cache_level {
I915_CACHE_NONE = 0,
I915_CACHE_LLC,
I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
};
/* The Graphics Translation Table is the way in which GEN hardware translates a
* Graphics Virtual Address into a Physical Address. In addition to the normal
* collateral associated with any va->pa translations GEN hardware also has a
* portion of the GTT which can be mapped by the CPU and remain both coherent
* and correct (in cases like swizzling). That region is referred to as GMADR in
* the spec.
*/
struct i915_gtt {
unsigned long start; /* Start offset of used GTT */
size_t total; /* Total size GTT can map */
size_t stolen_size; /* Total size of stolen memory */
unsigned long mappable_end; /* End offset that we can CPU map */
struct io_mapping *mappable; /* Mapping to our CPU mappable region */
phys_addr_t mappable_base; /* PA of our GMADR */
/** "Graphics Stolen Memory" holds the global PTEs */
void __iomem *gsm;
bool do_idle_maps;
dma_addr_t scratch_page_dma;
struct page *scratch_page;
/* global gtt ops */
int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
size_t *stolen);
void (*gtt_remove)(struct drm_device *dev);
void (*gtt_clear_range)(struct drm_device *dev,
unsigned int first_entry,
unsigned int num_entries);
void (*gtt_insert_entries)(struct drm_device *dev,
struct sg_table *st,
unsigned int pg_start,
enum i915_cache_level cache_level);
};
#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
#define I915_PPGTT_PD_ENTRIES 512 #define I915_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES 1024 #define I915_PPGTT_PT_ENTRIES 1024
struct i915_hw_ppgtt { struct i915_hw_ppgtt {
...@@ -373,6 +417,16 @@ struct i915_hw_ppgtt { ...@@ -373,6 +417,16 @@ struct i915_hw_ppgtt {
uint32_t pd_offset; uint32_t pd_offset;
dma_addr_t *pt_dma_addr; dma_addr_t *pt_dma_addr;
dma_addr_t scratch_page_dma_addr; dma_addr_t scratch_page_dma_addr;
/* pte functions, mirroring the interface of the global gtt. */
void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
unsigned int first_entry,
unsigned int num_entries);
void (*insert_entries)(struct i915_hw_ppgtt *ppgtt,
struct sg_table *st,
unsigned int pg_start,
enum i915_cache_level cache_level);
void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
}; };
...@@ -642,6 +696,153 @@ struct intel_l3_parity { ...@@ -642,6 +696,153 @@ struct intel_l3_parity {
struct work_struct error_work; struct work_struct error_work;
}; };
struct i915_gem_mm {
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
/** Memory allocator for GTT */
struct drm_mm gtt_space;
/** List of all objects in gtt_space. Used to restore gtt
* mappings on resume */
struct list_head bound_list;
/**
* List of objects which are not bound to the GTT (thus
* are idle and not used by the GPU) but still have
* (presumably uncached) pages still attached.
*/
struct list_head unbound_list;
/** Usable portion of the GTT for GEM */
unsigned long stolen_base; /* limited to low memory (32-bit) */
int gtt_mtrr;
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
struct shrinker inactive_shrinker;
bool shrinker_no_lock_stealing;
/**
* List of objects currently involved in rendering.
*
* Includes buffers having the contents of their GPU caches
* flushed, not necessarily primitives. last_rendering_seqno
* represents when the rendering involved will be completed.
*
* A reference is held on the buffer while on this list.
*/
struct list_head active_list;
/**
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
* last_rendering_seqno is 0 while an object is in this list.
*
* A reference is not held on the buffer while on this list,
* as merely being GTT-bound shouldn't prevent its being
* freed, and we'll pull it off the list in the free path.
*/
struct list_head inactive_list;
/** LRU list of objects with fence regs on them. */
struct list_head fence_list;
/**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
* be retired once the system goes idle. Set a timer to
* fire periodically while the ring is running. When it
* fires, go retire requests.
*/
struct delayed_work retire_work;
/**
* Are we in a non-interruptible section of code like
* modesetting?
*/
bool interruptible;
/**
* Flag if the X Server, and thus DRM, is not currently in
* control of the device.
*
* This is set between LeaveVT and EnterVT. It needs to be
* replaced with a semaphore. It also needs to be
* transitioned away from for kernel modesetting.
*/
int suspended;
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
/** Bit 6 swizzling required for Y tiling */
uint32_t bit_6_swizzle_y;
/* storage for physical objects */
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
/* accounting, useful for userland debugging */
size_t object_memory;
u32 object_count;
};
struct i915_gpu_error {
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd[I915_NUM_RINGS];
uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
/* For reset and error_state handling. */
spinlock_t lock;
/* Protected by the above dev->gpu_error.lock. */
struct drm_i915_error_state *first_error;
struct work_struct work;
unsigned long last_reset;
/**
* State variable and reset counter controlling the reset flow
*
* Upper bits are for the reset counter. This counter is used by the
* wait_seqno code to race-free noticed that a reset event happened and
* that it needs to restart the entire ioctl (since most likely the
* seqno it waited for won't ever signal anytime soon).
*
* This is important for lock-free wait paths, where no contended lock
* naturally enforces the correct ordering between the bail-out of the
* waiter and the gpu reset work code.
*
* Lowest bit controls the reset state machine: Set means a reset is in
* progress. This state will (presuming we don't have any bugs) decay
* into either unset (successful reset) or the special WEDGED value (hw
* terminally sour). All waiters on the reset_queue will be woken when
* that happens.
*/
atomic_t reset_counter;
/**
* Special values/flags for reset_counter
*
* Note that the code relies on
* I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
* being true.
*/
#define I915_RESET_IN_PROGRESS_FLAG 1
#define I915_WEDGED 0xffffffff
/**
* Waitqueue to signal when the reset has completed. Used by clients
* that wait for dev_priv->mm.wedged to settle.
*/
wait_queue_head_t reset_queue;
/* For gpu hang simulation. */
unsigned int stop_rings;
};
typedef struct drm_i915_private { typedef struct drm_i915_private {
struct drm_device *dev; struct drm_device *dev;
struct kmem_cache *slab; struct kmem_cache *slab;
...@@ -697,7 +898,6 @@ typedef struct drm_i915_private { ...@@ -697,7 +898,6 @@ typedef struct drm_i915_private {
u32 pipestat[2]; u32 pipestat[2];
u32 irq_mask; u32 irq_mask;
u32 gt_irq_mask; u32 gt_irq_mask;
u32 pch_irq_mask;
u32 hotplug_supported_mask; u32 hotplug_supported_mask;
struct work_struct hotplug_work; struct work_struct hotplug_work;
...@@ -706,16 +906,6 @@ typedef struct drm_i915_private { ...@@ -706,16 +906,6 @@ typedef struct drm_i915_private {
int num_pipe; int num_pipe;
int num_pch_pll; int num_pch_pll;
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd[I915_NUM_RINGS];
uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
unsigned int stop_rings;
unsigned long cfb_size; unsigned long cfb_size;
unsigned int cfb_fb; unsigned int cfb_fb;
enum plane cfb_plane; enum plane cfb_plane;
...@@ -763,11 +953,6 @@ typedef struct drm_i915_private { ...@@ -763,11 +953,6 @@ typedef struct drm_i915_private {
unsigned int fsb_freq, mem_freq, is_ddr3; unsigned int fsb_freq, mem_freq, is_ddr3;
spinlock_t error_lock;
/* Protected by dev->error_lock. */
struct drm_i915_error_state *first_error;
struct work_struct error_work;
struct completion error_completion;
struct workqueue_struct *wq; struct workqueue_struct *wq;
/* Display functions */ /* Display functions */
...@@ -782,116 +967,9 @@ typedef struct drm_i915_private { ...@@ -782,116 +967,9 @@ typedef struct drm_i915_private {
/* Register state */ /* Register state */
bool modeset_on_lid; bool modeset_on_lid;
struct { struct i915_gtt gtt;
/** Bridge to intel-gtt-ko */
struct intel_gtt *gtt; struct i915_gem_mm mm;
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
/** Memory allocator for GTT */
struct drm_mm gtt_space;
/** List of all objects in gtt_space. Used to restore gtt
* mappings on resume */
struct list_head bound_list;
/**
* List of objects which are not bound to the GTT (thus
* are idle and not used by the GPU) but still have
* (presumably uncached) pages still attached.
*/
struct list_head unbound_list;
/** Usable portion of the GTT for GEM */
unsigned long gtt_start;
unsigned long gtt_mappable_end;
unsigned long gtt_end;
unsigned long stolen_base; /* limited to low memory (32-bit) */
/** "Graphics Stolen Memory" holds the global PTEs */
void __iomem *gsm;
struct io_mapping *gtt_mapping;
phys_addr_t gtt_base_addr;
int gtt_mtrr;
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
struct shrinker inactive_shrinker;
bool shrinker_no_lock_stealing;
/**
* List of objects currently involved in rendering.
*
* Includes buffers having the contents of their GPU caches
* flushed, not necessarily primitives. last_rendering_seqno
* represents when the rendering involved will be completed.
*
* A reference is held on the buffer while on this list.
*/
struct list_head active_list;
/**
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
* last_rendering_seqno is 0 while an object is in this list.
*
* A reference is not held on the buffer while on this list,
* as merely being GTT-bound shouldn't prevent its being
* freed, and we'll pull it off the list in the free path.
*/
struct list_head inactive_list;
/** LRU list of objects with fence regs on them. */
struct list_head fence_list;
/**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
* be retired once the system goes idle. Set a timer to
* fire periodically while the ring is running. When it
* fires, go retire requests.
*/
struct delayed_work retire_work;
/**
* Are we in a non-interruptible section of code like
* modesetting?
*/
bool interruptible;
/**
* Flag if the X Server, and thus DRM, is not currently in
* control of the device.
*
* This is set between LeaveVT and EnterVT. It needs to be
* replaced with a semaphore. It also needs to be
* transitioned away from for kernel modesetting.
*/
int suspended;
/**
* Flag if the hardware appears to be wedged.
*
* This is set when attempts to idle the device timeout.
* It prevents command submission from occurring and makes
* every pending request fail
*/
atomic_t wedged;
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
/** Bit 6 swizzling required for Y tiling */
uint32_t bit_6_swizzle_y;
/* storage for physical objects */
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
/* accounting, useful for userland debugging */
size_t gtt_total;
size_t mappable_gtt_total;
size_t object_memory;
u32 object_count;
} mm;
/* Kernel Modesetting */ /* Kernel Modesetting */
...@@ -933,7 +1011,7 @@ typedef struct drm_i915_private { ...@@ -933,7 +1011,7 @@ typedef struct drm_i915_private {
struct drm_mm_node *compressed_fb; struct drm_mm_node *compressed_fb;
struct drm_mm_node *compressed_llb; struct drm_mm_node *compressed_llb;
unsigned long last_gpu_reset; struct i915_gpu_error gpu_error;
/* list of fbdev register on this device */ /* list of fbdev register on this device */
struct intel_fbdev *fbdev; struct intel_fbdev *fbdev;
...@@ -973,12 +1051,6 @@ enum hdmi_force_audio { ...@@ -973,12 +1051,6 @@ enum hdmi_force_audio {
HDMI_AUDIO_ON, /* force turn on HDMI audio */ HDMI_AUDIO_ON, /* force turn on HDMI audio */
}; };
enum i915_cache_level {
I915_CACHE_NONE = 0,
I915_CACHE_LLC,
I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
};
#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1) #define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
struct drm_i915_gem_object_ops { struct drm_i915_gem_object_ops {
...@@ -1446,6 +1518,7 @@ int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, ...@@ -1446,6 +1518,7 @@ int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
bool nonblocking); bool nonblocking);
void i915_gem_object_unpin(struct drm_i915_gem_object *obj); void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj); void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev); void i915_gem_lastclose(struct drm_device *dev);
...@@ -1524,8 +1597,18 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) ...@@ -1524,8 +1597,18 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
void i915_gem_retire_requests(struct drm_device *dev); void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv, int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible); bool interruptible);
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
return unlikely(atomic_read(&error->reset_counter)
& I915_RESET_IN_PROGRESS_FLAG);
}
static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
{
return atomic_read(&error->reset_counter) == I915_WEDGED;
}
void i915_gem_reset(struct drm_device *dev); void i915_gem_reset(struct drm_device *dev);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj); void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
...@@ -1566,9 +1649,10 @@ void i915_gem_free_all_phys_object(struct drm_device *dev); ...@@ -1566,9 +1649,10 @@ void i915_gem_free_all_phys_object(struct drm_device *dev);
void i915_gem_release(struct drm_device *dev, struct drm_file *file); void i915_gem_release(struct drm_device *dev, struct drm_file *file);
uint32_t uint32_t
i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
uint32_t size, uint32_t
int tiling_mode); i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
int tiling_mode, bool fenced);
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level); enum i915_cache_level cache_level);
...@@ -1591,7 +1675,6 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, ...@@ -1591,7 +1675,6 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file); struct drm_file *file);
/* i915_gem_gtt.c */ /* i915_gem_gtt.c */
int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj, struct drm_i915_gem_object *obj,
...@@ -1609,7 +1692,6 @@ void i915_gem_init_global_gtt(struct drm_device *dev); ...@@ -1609,7 +1692,6 @@ void i915_gem_init_global_gtt(struct drm_device *dev);
void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
unsigned long mappable_end, unsigned long end); unsigned long mappable_end, unsigned long end);
int i915_gem_gtt_init(struct drm_device *dev); int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_gtt_fini(struct drm_device *dev);
static inline void i915_gem_chipset_flush(struct drm_device *dev) static inline void i915_gem_chipset_flush(struct drm_device *dev)
{ {
if (INTEL_INFO(dev)->gen < 6) if (INTEL_INFO(dev)->gen < 6)
...@@ -1668,9 +1750,9 @@ void i915_debugfs_cleanup(struct drm_minor *minor); ...@@ -1668,9 +1750,9 @@ void i915_debugfs_cleanup(struct drm_minor *minor);
extern int i915_save_state(struct drm_device *dev); extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev); extern int i915_restore_state(struct drm_device *dev);
/* i915_suspend.c */ /* i915_ums.c */
extern int i915_save_state(struct drm_device *dev); void i915_save_display_reg(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev); void i915_restore_display_reg(struct drm_device *dev);
/* i915_sysfs.c */ /* i915_sysfs.c */
void i915_setup_sysfs(struct drm_device *dev_priv); void i915_setup_sysfs(struct drm_device *dev_priv);
...@@ -1727,6 +1809,7 @@ extern void intel_modeset_cleanup(struct drm_device *dev); ...@@ -1727,6 +1809,7 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void intel_modeset_setup_hw_state(struct drm_device *dev, extern void intel_modeset_setup_hw_state(struct drm_device *dev,
bool force_restore); bool force_restore);
extern void i915_redisable_vga(struct drm_device *dev);
extern bool intel_fbc_enabled(struct drm_device *dev); extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev); extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val); extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
...@@ -1799,5 +1882,19 @@ __i915_write(64, q) ...@@ -1799,5 +1882,19 @@ __i915_write(64, q)
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
/* "Broadcast RGB" property */
#define INTEL_BROADCAST_RGB_AUTO 0
#define INTEL_BROADCAST_RGB_FULL 1
#define INTEL_BROADCAST_RGB_LIMITED 2
static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
{
if (HAS_PCH_SPLIT(dev))
return CPU_VGACNTRL;
else if (IS_VALLEYVIEW(dev))
return VLV_VGACNTRL;
else
return VGACNTRL;
}
#endif #endif
This diff is collapsed.
...@@ -80,7 +80,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, ...@@ -80,7 +80,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
if (mappable) if (mappable)
drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
min_size, alignment, cache_level, min_size, alignment, cache_level,
0, dev_priv->mm.gtt_mappable_end); 0, dev_priv->gtt.mappable_end);
else else
drm_mm_init_scan(&dev_priv->mm.gtt_space, drm_mm_init_scan(&dev_priv->mm.gtt_space,
min_size, alignment, cache_level); min_size, alignment, cache_level);
......
...@@ -34,61 +34,133 @@ ...@@ -34,61 +34,133 @@
#include <linux/dma_remapping.h> #include <linux/dma_remapping.h>
struct eb_objects { struct eb_objects {
struct list_head objects;
int and; int and;
struct hlist_head buckets[0]; union {
struct drm_i915_gem_object *lut[0];
struct hlist_head buckets[0];
};
}; };
static struct eb_objects * static struct eb_objects *
eb_create(int size) eb_create(struct drm_i915_gem_execbuffer2 *args)
{ {
struct eb_objects *eb; struct eb_objects *eb = NULL;
int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head))); if (args->flags & I915_EXEC_HANDLE_LUT) {
while (count > size) int size = args->buffer_count;
count >>= 1; size *= sizeof(struct drm_i915_gem_object *);
eb = kzalloc(count*sizeof(struct hlist_head) + size += sizeof(struct eb_objects);
sizeof(struct eb_objects), eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
GFP_KERNEL); }
if (eb == NULL)
return eb; if (eb == NULL) {
int size = args->buffer_count;
eb->and = count - 1; int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
while (count > 2*size)
count >>= 1;
eb = kzalloc(count*sizeof(struct hlist_head) +
sizeof(struct eb_objects),
GFP_TEMPORARY);
if (eb == NULL)
return eb;
eb->and = count - 1;
} else
eb->and = -args->buffer_count;
INIT_LIST_HEAD(&eb->objects);
return eb; return eb;
} }
static void static void
eb_reset(struct eb_objects *eb) eb_reset(struct eb_objects *eb)
{ {
memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); if (eb->and >= 0)
memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
} }
static void static int
eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj) eb_lookup_objects(struct eb_objects *eb,
struct drm_i915_gem_exec_object2 *exec,
const struct drm_i915_gem_execbuffer2 *args,
struct drm_file *file)
{ {
hlist_add_head(&obj->exec_node, int i;
&eb->buckets[obj->exec_handle & eb->and]);
spin_lock(&file->table_lock);
for (i = 0; i < args->buffer_count; i++) {
struct drm_i915_gem_object *obj;
obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
if (obj == NULL) {
spin_unlock(&file->table_lock);
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
return -ENOENT;
}
if (!list_empty(&obj->exec_list)) {
spin_unlock(&file->table_lock);
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
obj, exec[i].handle, i);
return -EINVAL;
}
drm_gem_object_reference(&obj->base);
list_add_tail(&obj->exec_list, &eb->objects);
obj->exec_entry = &exec[i];
if (eb->and < 0) {
eb->lut[i] = obj;
} else {
uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
obj->exec_handle = handle;
hlist_add_head(&obj->exec_node,
&eb->buckets[handle & eb->and]);
}
}
spin_unlock(&file->table_lock);
return 0;
} }
static struct drm_i915_gem_object * static struct drm_i915_gem_object *
eb_get_object(struct eb_objects *eb, unsigned long handle) eb_get_object(struct eb_objects *eb, unsigned long handle)
{ {
struct hlist_head *head; if (eb->and < 0) {
struct hlist_node *node; if (handle >= -eb->and)
struct drm_i915_gem_object *obj; return NULL;
return eb->lut[handle];
} else {
struct hlist_head *head;
struct hlist_node *node;
head = &eb->buckets[handle & eb->and]; head = &eb->buckets[handle & eb->and];
hlist_for_each(node, head) { hlist_for_each(node, head) {
obj = hlist_entry(node, struct drm_i915_gem_object, exec_node); struct drm_i915_gem_object *obj;
if (obj->exec_handle == handle)
return obj;
}
return NULL; obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
if (obj->exec_handle == handle)
return obj;
}
return NULL;
}
} }
static void static void
eb_destroy(struct eb_objects *eb) eb_destroy(struct eb_objects *eb)
{ {
while (!list_empty(&eb->objects)) {
struct drm_i915_gem_object *obj;
obj = list_first_entry(&eb->objects,
struct drm_i915_gem_object,
exec_list);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
}
kfree(eb); kfree(eb);
} }
...@@ -209,7 +281,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, ...@@ -209,7 +281,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
/* Map the page containing the relocation we're going to perform. */ /* Map the page containing the relocation we're going to perform. */
reloc->offset += obj->gtt_offset; reloc->offset += obj->gtt_offset;
reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
reloc->offset & PAGE_MASK); reloc->offset & PAGE_MASK);
reloc_entry = (uint32_t __iomem *) reloc_entry = (uint32_t __iomem *)
(reloc_page + (reloc->offset & ~PAGE_MASK)); (reloc_page + (reloc->offset & ~PAGE_MASK));
...@@ -288,8 +360,7 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, ...@@ -288,8 +360,7 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
static int static int
i915_gem_execbuffer_relocate(struct drm_device *dev, i915_gem_execbuffer_relocate(struct drm_device *dev,
struct eb_objects *eb, struct eb_objects *eb)
struct list_head *objects)
{ {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
int ret = 0; int ret = 0;
...@@ -302,7 +373,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, ...@@ -302,7 +373,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
* lockdep complains vehemently. * lockdep complains vehemently.
*/ */
pagefault_disable(); pagefault_disable();
list_for_each_entry(obj, objects, exec_list) { list_for_each_entry(obj, &eb->objects, exec_list) {
ret = i915_gem_execbuffer_relocate_object(obj, eb); ret = i915_gem_execbuffer_relocate_object(obj, eb);
if (ret) if (ret)
break; break;
...@@ -324,7 +395,8 @@ need_reloc_mappable(struct drm_i915_gem_object *obj) ...@@ -324,7 +395,8 @@ need_reloc_mappable(struct drm_i915_gem_object *obj)
static int static int
i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring) struct intel_ring_buffer *ring,
bool *need_reloc)
{ {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private; struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
...@@ -365,7 +437,20 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, ...@@ -365,7 +437,20 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
obj->has_aliasing_ppgtt_mapping = 1; obj->has_aliasing_ppgtt_mapping = 1;
} }
entry->offset = obj->gtt_offset; if (entry->offset != obj->gtt_offset) {
entry->offset = obj->gtt_offset;
*need_reloc = true;
}
if (entry->flags & EXEC_OBJECT_WRITE) {
obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
}
if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
!obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(obj, obj->cache_level);
return 0; return 0;
} }
...@@ -391,7 +476,8 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj) ...@@ -391,7 +476,8 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
static int static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_file *file, struct drm_file *file,
struct list_head *objects) struct list_head *objects,
bool *need_relocs)
{ {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct list_head ordered_objects; struct list_head ordered_objects;
...@@ -419,7 +505,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, ...@@ -419,7 +505,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
else else
list_move_tail(&obj->exec_list, &ordered_objects); list_move_tail(&obj->exec_list, &ordered_objects);
obj->base.pending_read_domains = 0; obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
obj->base.pending_write_domain = 0; obj->base.pending_write_domain = 0;
obj->pending_fenced_gpu_access = false; obj->pending_fenced_gpu_access = false;
} }
...@@ -459,7 +545,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, ...@@ -459,7 +545,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
(need_mappable && !obj->map_and_fenceable)) (need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj); ret = i915_gem_object_unbind(obj);
else else
ret = i915_gem_execbuffer_reserve_object(obj, ring); ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
if (ret) if (ret)
goto err; goto err;
} }
...@@ -469,7 +555,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, ...@@ -469,7 +555,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
if (obj->gtt_space) if (obj->gtt_space)
continue; continue;
ret = i915_gem_execbuffer_reserve_object(obj, ring); ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
if (ret) if (ret)
goto err; goto err;
} }
...@@ -489,21 +575,22 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, ...@@ -489,21 +575,22 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
static int static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev, i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_i915_gem_execbuffer2 *args,
struct drm_file *file, struct drm_file *file,
struct intel_ring_buffer *ring, struct intel_ring_buffer *ring,
struct list_head *objects,
struct eb_objects *eb, struct eb_objects *eb,
struct drm_i915_gem_exec_object2 *exec, struct drm_i915_gem_exec_object2 *exec)
int count)
{ {
struct drm_i915_gem_relocation_entry *reloc; struct drm_i915_gem_relocation_entry *reloc;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
bool need_relocs;
int *reloc_offset; int *reloc_offset;
int i, total, ret; int i, total, ret;
int count = args->buffer_count;
/* We may process another execbuffer during the unlock... */ /* We may process another execbuffer during the unlock... */
while (!list_empty(objects)) { while (!list_empty(&eb->objects)) {
obj = list_first_entry(objects, obj = list_first_entry(&eb->objects,
struct drm_i915_gem_object, struct drm_i915_gem_object,
exec_list); exec_list);
list_del_init(&obj->exec_list); list_del_init(&obj->exec_list);
...@@ -550,27 +637,16 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, ...@@ -550,27 +637,16 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
/* reacquire the objects */ /* reacquire the objects */
eb_reset(eb); eb_reset(eb);
for (i = 0; i < count; i++) { ret = eb_lookup_objects(eb, exec, args, file);
obj = to_intel_bo(drm_gem_object_lookup(dev, file, if (ret)
exec[i].handle)); goto err;
if (&obj->base == NULL) {
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
ret = -ENOENT;
goto err;
}
list_add_tail(&obj->exec_list, objects);
obj->exec_handle = exec[i].handle;
obj->exec_entry = &exec[i];
eb_add_object(eb, obj);
}
ret = i915_gem_execbuffer_reserve(ring, file, objects); need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
if (ret) if (ret)
goto err; goto err;
list_for_each_entry(obj, objects, exec_list) { list_for_each_entry(obj, &eb->objects, exec_list) {
int offset = obj->exec_entry - exec; int offset = obj->exec_entry - exec;
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
reloc + reloc_offset[offset]); reloc + reloc_offset[offset]);
...@@ -624,6 +700,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, ...@@ -624,6 +700,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
static bool static bool
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
{ {
if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
return false;
return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0; return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
} }
...@@ -637,6 +716,9 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, ...@@ -637,6 +716,9 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
int length; /* limited by fault_in_pages_readable() */ int length; /* limited by fault_in_pages_readable() */
if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
return -EINVAL;
/* First check for malicious input causing overflow */ /* First check for malicious input causing overflow */
if (exec[i].relocation_count > if (exec[i].relocation_count >
INT_MAX / sizeof(struct drm_i915_gem_relocation_entry)) INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
...@@ -644,9 +726,6 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, ...@@ -644,9 +726,6 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
length = exec[i].relocation_count * length = exec[i].relocation_count *
sizeof(struct drm_i915_gem_relocation_entry); sizeof(struct drm_i915_gem_relocation_entry);
if (!access_ok(VERIFY_READ, ptr, length))
return -EFAULT;
/* we may also need to update the presumed offsets */ /* we may also need to update the presumed offsets */
if (!access_ok(VERIFY_WRITE, ptr, length)) if (!access_ok(VERIFY_WRITE, ptr, length))
return -EFAULT; return -EFAULT;
...@@ -668,8 +747,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects, ...@@ -668,8 +747,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
u32 old_read = obj->base.read_domains; u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain; u32 old_write = obj->base.write_domain;
obj->base.read_domains = obj->base.pending_read_domains;
obj->base.write_domain = obj->base.pending_write_domain; obj->base.write_domain = obj->base.pending_write_domain;
if (obj->base.write_domain == 0)
obj->base.pending_read_domains |= obj->base.read_domains;
obj->base.read_domains = obj->base.pending_read_domains;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access; obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
i915_gem_object_move_to_active(obj, ring); i915_gem_object_move_to_active(obj, ring);
...@@ -728,21 +809,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -728,21 +809,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_exec_object2 *exec) struct drm_i915_gem_exec_object2 *exec)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head objects;
struct eb_objects *eb; struct eb_objects *eb;
struct drm_i915_gem_object *batch_obj; struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL; struct drm_clip_rect *cliprects = NULL;
struct intel_ring_buffer *ring; struct intel_ring_buffer *ring;
u32 ctx_id = i915_execbuffer2_get_context_id(*args); u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 exec_start, exec_len; u32 exec_start, exec_len;
u32 mask; u32 mask, flags;
u32 flags;
int ret, mode, i; int ret, mode, i;
bool need_relocs;
if (!i915_gem_check_execbuffer(args)) { if (!i915_gem_check_execbuffer(args))
DRM_DEBUG("execbuf with invalid offset/length\n");
return -EINVAL; return -EINVAL;
}
ret = validate_exec_list(exec, args->buffer_count); ret = validate_exec_list(exec, args->buffer_count);
if (ret) if (ret)
...@@ -863,7 +941,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -863,7 +941,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto pre_mutex_err; goto pre_mutex_err;
} }
eb = eb_create(args->buffer_count); eb = eb_create(args);
if (eb == NULL) { if (eb == NULL) {
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
ret = -ENOMEM; ret = -ENOMEM;
...@@ -871,51 +949,28 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -871,51 +949,28 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
} }
/* Look up object handles */ /* Look up object handles */
INIT_LIST_HEAD(&objects); ret = eb_lookup_objects(eb, exec, args, file);
for (i = 0; i < args->buffer_count; i++) { if (ret)
struct drm_i915_gem_object *obj; goto err;
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
if (&obj->base == NULL) {
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
/* prevent error path from reading uninitialized data */
ret = -ENOENT;
goto err;
}
if (!list_empty(&obj->exec_list)) {
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
obj, exec[i].handle, i);
ret = -EINVAL;
goto err;
}
list_add_tail(&obj->exec_list, &objects);
obj->exec_handle = exec[i].handle;
obj->exec_entry = &exec[i];
eb_add_object(eb, obj);
}
/* take note of the batch buffer before we might reorder the lists */ /* take note of the batch buffer before we might reorder the lists */
batch_obj = list_entry(objects.prev, batch_obj = list_entry(eb->objects.prev,
struct drm_i915_gem_object, struct drm_i915_gem_object,
exec_list); exec_list);
/* Move the objects en-masse into the GTT, evicting if necessary. */ /* Move the objects en-masse into the GTT, evicting if necessary. */
ret = i915_gem_execbuffer_reserve(ring, file, &objects); need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
if (ret) if (ret)
goto err; goto err;
/* The objects are in their final locations, apply the relocations. */ /* The objects are in their final locations, apply the relocations. */
ret = i915_gem_execbuffer_relocate(dev, eb, &objects); if (need_relocs)
ret = i915_gem_execbuffer_relocate(dev, eb);
if (ret) { if (ret) {
if (ret == -EFAULT) { if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, file, ring, ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
&objects, eb, eb, exec);
exec,
args->buffer_count);
BUG_ON(!mutex_is_locked(&dev->struct_mutex)); BUG_ON(!mutex_is_locked(&dev->struct_mutex));
} }
if (ret) if (ret)
...@@ -937,7 +992,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -937,7 +992,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
ret = i915_gem_execbuffer_move_to_gpu(ring, &objects); ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
if (ret) if (ret)
goto err; goto err;
...@@ -991,20 +1046,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -991,20 +1046,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
i915_gem_execbuffer_move_to_active(&objects, ring); i915_gem_execbuffer_move_to_active(&eb->objects, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring); i915_gem_execbuffer_retire_commands(dev, file, ring);
err: err:
eb_destroy(eb); eb_destroy(eb);
while (!list_empty(&objects)) {
struct drm_i915_gem_object *obj;
obj = list_first_entry(&objects,
struct drm_i915_gem_object,
exec_list);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
}
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -1113,7 +1159,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, ...@@ -1113,7 +1159,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
} }
exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count, exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (exec2_list == NULL) if (exec2_list == NULL)
exec2_list = drm_malloc_ab(sizeof(*exec2_list), exec2_list = drm_malloc_ab(sizeof(*exec2_list),
args->buffer_count); args->buffer_count);
......
This diff is collapsed.
...@@ -187,11 +187,11 @@ int i915_gem_init_stolen(struct drm_device *dev) ...@@ -187,11 +187,11 @@ int i915_gem_init_stolen(struct drm_device *dev)
if (dev_priv->mm.stolen_base == 0) if (dev_priv->mm.stolen_base == 0)
return 0; return 0;
DRM_DEBUG_KMS("found %d bytes of stolen memory at %08lx\n", DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
dev_priv->mm.gtt->stolen_size, dev_priv->mm.stolen_base); dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
/* Basic memrange allocator for stolen space */ /* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->mm.gtt->stolen_size); drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size);
return 0; return 0;
} }
...@@ -205,7 +205,7 @@ i915_pages_create_for_stolen(struct drm_device *dev, ...@@ -205,7 +205,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
struct scatterlist *sg; struct scatterlist *sg;
DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size); DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
BUG_ON(offset > dev_priv->mm.gtt->stolen_size - size); BUG_ON(offset > dev_priv->gtt.stolen_size - size);
/* We hide that we have no struct page backing our stolen object /* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake * by wrapping the contiguous physical allocation with a fake
......
...@@ -272,18 +272,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) ...@@ -272,18 +272,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
return false; return false;
} }
/* size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
* Previous chips need to be aligned to the size of the smallest
* fence register that can contain the object.
*/
if (INTEL_INFO(obj->base.dev)->gen == 3)
size = 1024*1024;
else
size = 512*1024;
while (size < obj->base.size)
size <<= 1;
if (obj->gtt_space->size != size) if (obj->gtt_space->size != size)
return false; return false;
...@@ -368,15 +357,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -368,15 +357,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
obj->map_and_fenceable = obj->map_and_fenceable =
obj->gtt_space == NULL || obj->gtt_space == NULL ||
(obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && (obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode)); i915_gem_object_fence_ok(obj, args->tiling_mode));
/* Rebind if we need a change of alignment */ /* Rebind if we need a change of alignment */
if (!obj->map_and_fenceable) { if (!obj->map_and_fenceable) {
u32 unfenced_alignment = u32 unfenced_alignment =
i915_gem_get_unfenced_gtt_alignment(dev, i915_gem_get_gtt_alignment(dev, obj->base.size,
obj->base.size, args->tiling_mode,
args->tiling_mode); false);
if (obj->gtt_offset & (unfenced_alignment - 1)) if (obj->gtt_offset & (unfenced_alignment - 1))
ret = i915_gem_object_unbind(obj); ret = i915_gem_object_unbind(obj);
} }
......
...@@ -356,8 +356,8 @@ static void notify_ring(struct drm_device *dev, ...@@ -356,8 +356,8 @@ static void notify_ring(struct drm_device *dev,
wake_up_all(&ring->irq_queue); wake_up_all(&ring->irq_queue);
if (i915_enable_hangcheck) { if (i915_enable_hangcheck) {
dev_priv->hangcheck_count = 0; dev_priv->gpu_error.hangcheck_count = 0;
mod_timer(&dev_priv->hangcheck_timer, mod_timer(&dev_priv->gpu_error.hangcheck_timer,
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
} }
} }
...@@ -862,23 +862,60 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) ...@@ -862,23 +862,60 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
*/ */
static void i915_error_work_func(struct work_struct *work) static void i915_error_work_func(struct work_struct *work)
{ {
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
error_work); work);
drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
gpu_error);
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
struct intel_ring_buffer *ring;
char *error_event[] = { "ERROR=1", NULL }; char *error_event[] = { "ERROR=1", NULL };
char *reset_event[] = { "RESET=1", NULL }; char *reset_event[] = { "RESET=1", NULL };
char *reset_done_event[] = { "ERROR=0", NULL }; char *reset_done_event[] = { "ERROR=0", NULL };
int i, ret;
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
if (atomic_read(&dev_priv->mm.wedged)) { /*
* Note that there's only one work item which does gpu resets, so we
* need not worry about concurrent gpu resets potentially incrementing
* error->reset_counter twice. We only need to take care of another
* racing irq/hangcheck declaring the gpu dead for a second time. A
* quick check for that is good enough: schedule_work ensures the
* correct ordering between hang detection and this work item, and since
* the reset in-progress bit is only ever set by code outside of this
* work we don't need to worry about any other races.
*/
if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
DRM_DEBUG_DRIVER("resetting chip\n"); DRM_DEBUG_DRIVER("resetting chip\n");
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
if (!i915_reset(dev)) { reset_event);
atomic_set(&dev_priv->mm.wedged, 0);
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); ret = i915_reset(dev);
if (ret == 0) {
/*
* After all the gem state is reset, increment the reset
* counter and wake up everyone waiting for the reset to
* complete.
*
* Since unlock operations are a one-sided barrier only,
* we need to insert a barrier here to order any seqno
* updates before
* the counter increment.
*/
smp_mb__before_atomic_inc();
atomic_inc(&dev_priv->gpu_error.reset_counter);
kobject_uevent_env(&dev->primary->kdev.kobj,
KOBJ_CHANGE, reset_done_event);
} else {
atomic_set(&error->reset_counter, I915_WEDGED);
} }
complete_all(&dev_priv->error_completion);
for_each_ring(ring, dev_priv, i)
wake_up_all(&ring->irq_queue);
wake_up_all(&dev_priv->gpu_error.reset_queue);
} }
} }
...@@ -939,7 +976,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv, ...@@ -939,7 +976,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
goto unwind; goto unwind;
local_irq_save(flags); local_irq_save(flags);
if (reloc_offset < dev_priv->mm.gtt_mappable_end && if (reloc_offset < dev_priv->gtt.mappable_end &&
src->has_global_gtt_mapping) { src->has_global_gtt_mapping) {
void __iomem *s; void __iomem *s;
...@@ -948,7 +985,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv, ...@@ -948,7 +985,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
* captures what the GPU read. * captures what the GPU read.
*/ */
s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
reloc_offset); reloc_offset);
memcpy_fromio(d, s, PAGE_SIZE); memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s); io_mapping_unmap_atomic(s);
...@@ -1255,9 +1292,9 @@ static void i915_capture_error_state(struct drm_device *dev) ...@@ -1255,9 +1292,9 @@ static void i915_capture_error_state(struct drm_device *dev)
unsigned long flags; unsigned long flags;
int i, pipe; int i, pipe;
spin_lock_irqsave(&dev_priv->error_lock, flags); spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
error = dev_priv->first_error; error = dev_priv->gpu_error.first_error;
spin_unlock_irqrestore(&dev_priv->error_lock, flags); spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
if (error) if (error)
return; return;
...@@ -1268,7 +1305,8 @@ static void i915_capture_error_state(struct drm_device *dev) ...@@ -1268,7 +1305,8 @@ static void i915_capture_error_state(struct drm_device *dev)
return; return;
} }
DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n", DRM_INFO("capturing error event; look for more information in"
"/sys/kernel/debug/dri/%d/i915_error_state\n",
dev->primary->index); dev->primary->index);
kref_init(&error->ref); kref_init(&error->ref);
...@@ -1341,12 +1379,12 @@ static void i915_capture_error_state(struct drm_device *dev) ...@@ -1341,12 +1379,12 @@ static void i915_capture_error_state(struct drm_device *dev)
error->overlay = intel_overlay_capture_error_state(dev); error->overlay = intel_overlay_capture_error_state(dev);
error->display = intel_display_capture_error_state(dev); error->display = intel_display_capture_error_state(dev);
spin_lock_irqsave(&dev_priv->error_lock, flags); spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
if (dev_priv->first_error == NULL) { if (dev_priv->gpu_error.first_error == NULL) {
dev_priv->first_error = error; dev_priv->gpu_error.first_error = error;
error = NULL; error = NULL;
} }
spin_unlock_irqrestore(&dev_priv->error_lock, flags); spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
if (error) if (error)
i915_error_state_free(&error->ref); i915_error_state_free(&error->ref);
...@@ -1358,10 +1396,10 @@ void i915_destroy_error_state(struct drm_device *dev) ...@@ -1358,10 +1396,10 @@ void i915_destroy_error_state(struct drm_device *dev)
struct drm_i915_error_state *error; struct drm_i915_error_state *error;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&dev_priv->error_lock, flags); spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
error = dev_priv->first_error; error = dev_priv->gpu_error.first_error;
dev_priv->first_error = NULL; dev_priv->gpu_error.first_error = NULL;
spin_unlock_irqrestore(&dev_priv->error_lock, flags); spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
if (error) if (error)
kref_put(&error->ref, i915_error_state_free); kref_put(&error->ref, i915_error_state_free);
...@@ -1482,17 +1520,18 @@ void i915_handle_error(struct drm_device *dev, bool wedged) ...@@ -1482,17 +1520,18 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
i915_report_and_clear_eir(dev); i915_report_and_clear_eir(dev);
if (wedged) { if (wedged) {
INIT_COMPLETION(dev_priv->error_completion); atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
atomic_set(&dev_priv->mm.wedged, 1); &dev_priv->gpu_error.reset_counter);
/* /*
* Wakeup waiting processes so they don't hang * Wakeup waiting processes so that the reset work item
* doesn't deadlock trying to grab various locks.
*/ */
for_each_ring(ring, dev_priv, i) for_each_ring(ring, dev_priv, i)
wake_up_all(&ring->irq_queue); wake_up_all(&ring->irq_queue);
} }
queue_work(dev_priv->wq, &dev_priv->error_work); queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
} }
static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
...@@ -1723,7 +1762,7 @@ static bool i915_hangcheck_hung(struct drm_device *dev) ...@@ -1723,7 +1762,7 @@ static bool i915_hangcheck_hung(struct drm_device *dev)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
if (dev_priv->hangcheck_count++ > 1) { if (dev_priv->gpu_error.hangcheck_count++ > 1) {
bool hung = true; bool hung = true;
DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
...@@ -1782,25 +1821,29 @@ void i915_hangcheck_elapsed(unsigned long data) ...@@ -1782,25 +1821,29 @@ void i915_hangcheck_elapsed(unsigned long data)
goto repeat; goto repeat;
} }
dev_priv->hangcheck_count = 0; dev_priv->gpu_error.hangcheck_count = 0;
return; return;
} }
i915_get_extra_instdone(dev, instdone); i915_get_extra_instdone(dev, instdone);
if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 && if (memcmp(dev_priv->gpu_error.last_acthd, acthd,
memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) { sizeof(acthd)) == 0 &&
memcmp(dev_priv->gpu_error.prev_instdone, instdone,
sizeof(instdone)) == 0) {
if (i915_hangcheck_hung(dev)) if (i915_hangcheck_hung(dev))
return; return;
} else { } else {
dev_priv->hangcheck_count = 0; dev_priv->gpu_error.hangcheck_count = 0;
memcpy(dev_priv->last_acthd, acthd, sizeof(acthd)); memcpy(dev_priv->gpu_error.last_acthd, acthd,
memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone)); sizeof(acthd));
memcpy(dev_priv->gpu_error.prev_instdone, instdone,
sizeof(instdone));
} }
repeat: repeat:
/* Reset timer case chip hangs without another request being added */ /* Reset timer case chip hangs without another request being added */
mod_timer(&dev_priv->hangcheck_timer, mod_timer(&dev_priv->gpu_error.hangcheck_timer,
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
} }
...@@ -1892,6 +1935,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) ...@@ -1892,6 +1935,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
DE_AUX_CHANNEL_A; DE_AUX_CHANNEL_A;
u32 render_irqs; u32 render_irqs;
u32 hotplug_mask; u32 hotplug_mask;
u32 pch_irq_mask;
dev_priv->irq_mask = ~display_mask; dev_priv->irq_mask = ~display_mask;
...@@ -1935,10 +1979,10 @@ static int ironlake_irq_postinstall(struct drm_device *dev) ...@@ -1935,10 +1979,10 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
SDE_AUX_MASK); SDE_AUX_MASK);
} }
dev_priv->pch_irq_mask = ~hotplug_mask; pch_irq_mask = ~hotplug_mask;
I915_WRITE(SDEIIR, I915_READ(SDEIIR)); I915_WRITE(SDEIIR, I915_READ(SDEIIR));
I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); I915_WRITE(SDEIMR, pch_irq_mask);
I915_WRITE(SDEIER, hotplug_mask); I915_WRITE(SDEIER, hotplug_mask);
POSTING_READ(SDEIER); POSTING_READ(SDEIER);
...@@ -1966,6 +2010,7 @@ static int ivybridge_irq_postinstall(struct drm_device *dev) ...@@ -1966,6 +2010,7 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
DE_AUX_CHANNEL_A_IVB; DE_AUX_CHANNEL_A_IVB;
u32 render_irqs; u32 render_irqs;
u32 hotplug_mask; u32 hotplug_mask;
u32 pch_irq_mask;
dev_priv->irq_mask = ~display_mask; dev_priv->irq_mask = ~display_mask;
...@@ -1995,10 +2040,10 @@ static int ivybridge_irq_postinstall(struct drm_device *dev) ...@@ -1995,10 +2040,10 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
SDE_PORTD_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT |
SDE_GMBUS_CPT | SDE_GMBUS_CPT |
SDE_AUX_MASK_CPT); SDE_AUX_MASK_CPT);
dev_priv->pch_irq_mask = ~hotplug_mask; pch_irq_mask = ~hotplug_mask;
I915_WRITE(SDEIIR, I915_READ(SDEIIR)); I915_WRITE(SDEIIR, I915_READ(SDEIIR));
I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); I915_WRITE(SDEIMR, pch_irq_mask);
I915_WRITE(SDEIER, hotplug_mask); I915_WRITE(SDEIER, hotplug_mask);
POSTING_READ(SDEIER); POSTING_READ(SDEIER);
...@@ -2767,11 +2812,12 @@ void intel_irq_init(struct drm_device *dev) ...@@ -2767,11 +2812,12 @@ void intel_irq_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func); INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, setup_timer(&dev_priv->gpu_error.hangcheck_timer,
i915_hangcheck_elapsed,
(unsigned long) dev); (unsigned long) dev);
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -267,27 +267,27 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) ...@@ -267,27 +267,27 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
crt->force_hotplug_required = 0; crt->force_hotplug_required = 0;
save_adpa = adpa = I915_READ(PCH_ADPA); save_adpa = adpa = I915_READ(crt->adpa_reg);
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
if (turn_off_dac) if (turn_off_dac)
adpa &= ~ADPA_DAC_ENABLE; adpa &= ~ADPA_DAC_ENABLE;
I915_WRITE(PCH_ADPA, adpa); I915_WRITE(crt->adpa_reg, adpa);
if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
1000)) 1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
if (turn_off_dac) { if (turn_off_dac) {
I915_WRITE(PCH_ADPA, save_adpa); I915_WRITE(crt->adpa_reg, save_adpa);
POSTING_READ(PCH_ADPA); POSTING_READ(crt->adpa_reg);
} }
} }
/* Check the status to see if both blue and green are on now */ /* Check the status to see if both blue and green are on now */
adpa = I915_READ(PCH_ADPA); adpa = I915_READ(crt->adpa_reg);
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true; ret = true;
else else
...@@ -300,26 +300,27 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) ...@@ -300,26 +300,27 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
{ {
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
struct intel_crt *crt = intel_attached_crt(connector);
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 adpa; u32 adpa;
bool ret; bool ret;
u32 save_adpa; u32 save_adpa;
save_adpa = adpa = I915_READ(ADPA); save_adpa = adpa = I915_READ(crt->adpa_reg);
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
I915_WRITE(ADPA, adpa); I915_WRITE(crt->adpa_reg, adpa);
if (wait_for((I915_READ(ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
1000)) { 1000)) {
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
I915_WRITE(ADPA, save_adpa); I915_WRITE(crt->adpa_reg, save_adpa);
} }
/* Check the status to see if both blue and green are on now */ /* Check the status to see if both blue and green are on now */
adpa = I915_READ(ADPA); adpa = I915_READ(crt->adpa_reg);
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true; ret = true;
else else
...@@ -665,11 +666,11 @@ static void intel_crt_reset(struct drm_connector *connector) ...@@ -665,11 +666,11 @@ static void intel_crt_reset(struct drm_connector *connector)
if (HAS_PCH_SPLIT(dev)) { if (HAS_PCH_SPLIT(dev)) {
u32 adpa; u32 adpa;
adpa = I915_READ(PCH_ADPA); adpa = I915_READ(crt->adpa_reg);
adpa &= ~ADPA_CRT_HOTPLUG_MASK; adpa &= ~ADPA_CRT_HOTPLUG_MASK;
adpa |= ADPA_HOTPLUG_BITS; adpa |= ADPA_HOTPLUG_BITS;
I915_WRITE(PCH_ADPA, adpa); I915_WRITE(crt->adpa_reg, adpa);
POSTING_READ(PCH_ADPA); POSTING_READ(crt->adpa_reg);
DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa); DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = 1; crt->force_hotplug_required = 1;
......
...@@ -677,6 +677,7 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder, ...@@ -677,6 +677,7 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n", DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n",
port_name(port), pipe_name(pipe)); port_name(port), pipe_name(pipe));
intel_crtc->eld_vld = false;
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
...@@ -987,7 +988,13 @@ void intel_ddi_enable_pipe_func(struct drm_crtc *crtc) ...@@ -987,7 +988,13 @@ void intel_ddi_enable_pipe_func(struct drm_crtc *crtc)
if (cpu_transcoder == TRANSCODER_EDP) { if (cpu_transcoder == TRANSCODER_EDP) {
switch (pipe) { switch (pipe) {
case PIPE_A: case PIPE_A:
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; /* Can only use the always-on power well for eDP when
* not using the panel fitter, and when not using motion
* blur mitigation (which we don't support). */
if (dev_priv->pch_pf_size)
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
else
temp |= TRANS_DDI_EDP_INPUT_A_ON;
break; break;
case PIPE_B: case PIPE_B:
temp |= TRANS_DDI_EDP_INPUT_B_ONOFF; temp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
...@@ -1287,10 +1294,14 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) ...@@ -1287,10 +1294,14 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
static void intel_enable_ddi(struct intel_encoder *intel_encoder) static void intel_enable_ddi(struct intel_encoder *intel_encoder)
{ {
struct drm_encoder *encoder = &intel_encoder->base; struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_ddi_get_encoder_port(intel_encoder); enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type; int type = intel_encoder->type;
uint32_t tmp;
if (type == INTEL_OUTPUT_HDMI) { if (type == INTEL_OUTPUT_HDMI) {
/* In HDMI/DVI mode, the port width, and swing/emphasis values /* In HDMI/DVI mode, the port width, and swing/emphasis values
...@@ -1303,18 +1314,34 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder) ...@@ -1303,18 +1314,34 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
ironlake_edp_backlight_on(intel_dp); ironlake_edp_backlight_on(intel_dp);
} }
if (intel_crtc->eld_vld) {
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
}
} }
static void intel_disable_ddi(struct intel_encoder *intel_encoder) static void intel_disable_ddi(struct intel_encoder *intel_encoder)
{ {
struct drm_encoder *encoder = &intel_encoder->base; struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int type = intel_encoder->type; int type = intel_encoder->type;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
if (type == INTEL_OUTPUT_EDP) { if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
ironlake_edp_backlight_off(intel_dp); ironlake_edp_backlight_off(intel_dp);
} }
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
} }
int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
......
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment