Commit 9712def2 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'drm-intel-next-2013-08-09' of...

Merge tag 'drm-intel-next-2013-08-09' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

Daniel writes:
New pile of stuff for -next:
- Cleanup of the old crtc helper callbacks, all encoders are now converted
  to the i915 modeset infrastructure.
- Massive amount of wm patches from Ville for ilk, snb, ivb, hsw, this is
  prep work to eventually get things going for nuclear pageflips where we
  need to adjust watermarks on the fly.
- More vm/vma patches from Ben. This refactoring isn't yet fully rolled
  out, we miss the execbuf conversion and some of the low-level
  bind/unbind support code.
- Convert our hdmi infoframe code to use the new common helper functions
  (Damien). This contains some bugfixes for the common infoframe helpers.
- Some cruft removal from Damien.
- Various smaller bits&pieces all over, as usual.

* tag 'drm-intel-next-2013-08-09' of git://people.freedesktop.org/~danvet/drm-intel: (105 commits)
  drm/i915: Fix FB WM for HSW
  drm/i915: expose HDMI connectors on port C on BYT
  drm/i915: fix a limit check in hsw_compute_wm_results()
  drm/i915: unbreak i915_gem_object_ggtt_unbind()
  drm/i915: Make intel_set_mode() static
  drm/i915: Remove intel_modeset_disable()
  drm/i915: Make intel_encoder_dpms() static
  drm/i915: Make i915_hangcheck_elapsed() static
  drm/i915: Fix #endif comment
  drm/i915: Remove i915_gem_object_check_coherency()
  drm/i915: Remove stale prototypes
  drm/i915: List objects allocated from stolen memory in debugfs
  drm/i915: Always call intel_update_sprite_watermarks() when disabling a plane
  drm/i915: Pass plane and crtc to intel_update_sprite_watermarks
  drm/i915: Don't try to disable plane if it's already disabled
  drm/i915: Pass crtc to our update/disable_plane hooks
  drm/i915: Split plane watermark parameters into a separate struct
  drm/i915: Pull some watermarks state into a separate structure
  drm/i915: Calculate max watermark levels for ILK+
  drm/i915: Rename hsw_lp_wm_result to intel_wm_level
  ...
parents 66cc8b6b 5c536613
......@@ -3102,11 +3102,13 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
if (err < 0)
return err;
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
frame->pixel_repeat = 1;
frame->video_code = drm_match_cea_mode(mode);
if (!frame->video_code)
return 0;
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
frame->active_info_valid = 1;
frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
return 0;
......
......@@ -30,6 +30,7 @@
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/list_sort.h>
#include <drm/drmP.h>
#include "intel_drv.h"
#include "intel_ringbuffer.h"
......@@ -89,13 +90,20 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
}
}
static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
{
return obj->has_global_gtt_mapping ? "g" : " ";
}
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
seq_printf(m, "%pK: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
struct i915_vma *vma;
seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
&obj->base,
get_pin_flag(obj),
get_tiling_flag(obj),
get_global_flag(obj),
obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain,
......@@ -111,9 +119,14 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (pinned x %d)", obj->pin_count);
if (obj->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj->fence_reg);
if (i915_gem_obj_ggtt_bound(obj))
seq_printf(m, " (gtt offset: %08lx, size: %08x)",
i915_gem_obj_ggtt_offset(obj), (unsigned int)i915_gem_obj_ggtt_size(obj));
list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (!i915_is_ggtt(vma->vm))
seq_puts(m, " (pp");
else
seq_puts(m, " (g");
seq_printf(m, "gtt offset: %08lx, size: %08lx)",
vma->node.start, vma->node.size);
}
if (obj->stolen)
seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
if (obj->pin_mappable || obj->fault_mappable) {
......@@ -137,7 +150,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
size_t total_obj_size, total_gtt_size;
int count, ret;
......@@ -145,6 +158,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
if (ret)
return ret;
/* FIXME: the user of this interface might want more than just GGTT */
switch (list) {
case ACTIVE_LIST:
seq_puts(m, "Active:\n");
......@@ -160,14 +174,75 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
}
total_obj_size = total_gtt_size = count = 0;
list_for_each_entry(obj, head, mm_list) {
seq_puts(m, " ");
describe_obj(m, obj);
seq_putc(m, '\n');
list_for_each_entry(vma, head, mm_list) {
seq_printf(m, " ");
describe_obj(m, vma->obj);
seq_printf(m, "\n");
total_obj_size += vma->obj->base.size;
total_gtt_size += vma->node.size;
count++;
}
mutex_unlock(&dev->struct_mutex);
seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
count, total_obj_size, total_gtt_size);
return 0;
}
static int obj_rank_by_stolen(void *priv,
struct list_head *A, struct list_head *B)
{
struct drm_i915_gem_object *a =
container_of(A, struct drm_i915_gem_object, exec_list);
struct drm_i915_gem_object *b =
container_of(B, struct drm_i915_gem_object, exec_list);
return a->stolen->start - b->stolen->start;
}
static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
size_t total_obj_size, total_gtt_size;
LIST_HEAD(stolen);
int count, ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
total_obj_size = total_gtt_size = count = 0;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (obj->stolen == NULL)
continue;
list_add(&obj->exec_list, &stolen);
total_obj_size += obj->base.size;
total_gtt_size += i915_gem_obj_ggtt_size(obj);
count++;
}
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
if (obj->stolen == NULL)
continue;
list_add(&obj->exec_list, &stolen);
total_obj_size += obj->base.size;
count++;
}
list_sort(NULL, &stolen, obj_rank_by_stolen);
seq_puts(m, "Stolen:\n");
while (!list_empty(&stolen)) {
obj = list_first_entry(&stolen, typeof(*obj), exec_list);
seq_puts(m, " ");
describe_obj(m, obj);
seq_putc(m, '\n');
list_del_init(&obj->exec_list);
}
mutex_unlock(&dev->struct_mutex);
seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
......@@ -212,7 +287,18 @@ static int per_file_stats(int id, void *ptr, void *data)
return 0;
}
static int i915_gem_object_info(struct seq_file *m, void *data)
#define count_vmas(list, member) do { \
list_for_each_entry(vma, list, member) { \
size += i915_gem_obj_ggtt_size(vma->obj); \
++count; \
if (vma->obj->map_and_fenceable) { \
mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
++mappable_count; \
} \
} \
} while (0)
static int i915_gem_object_info(struct seq_file *m, void* data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
......@@ -222,6 +308,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
struct drm_i915_gem_object *obj;
struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_file *file;
struct i915_vma *vma;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
......@@ -238,12 +325,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
count_objects(&vm->active_list, mm_list);
count_vmas(&vm->active_list, mm_list);
seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
count_objects(&vm->inactive_list, mm_list);
count_vmas(&vm->inactive_list, mm_list);
seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
......@@ -1099,6 +1186,12 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
} else {
seq_puts(m, "FBC disabled: ");
switch (dev_priv->fbc.no_fbc_reason) {
case FBC_OK:
seq_puts(m, "FBC actived, but currently disabled in hardware");
break;
case FBC_UNSUPPORTED:
seq_puts(m, "unsupported by this chipset");
break;
case FBC_NO_OUTPUT:
seq_puts(m, "no outputs");
break;
......@@ -1756,7 +1849,8 @@ i915_drop_caches_set(void *data, u64 val)
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next;
struct i915_address_space *vm = &dev_priv->gtt.base;
struct i915_address_space *vm;
struct i915_vma *vma, *x;
int ret;
DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
......@@ -1777,13 +1871,17 @@ i915_drop_caches_set(void *data, u64 val)
i915_gem_retire_requests(dev);
if (val & DROP_BOUND) {
list_for_each_entry_safe(obj, next, &vm->inactive_list,
mm_list)
if (obj->pin_count == 0) {
ret = i915_gem_object_unbind(obj);
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
list_for_each_entry_safe(vma, x, &vm->inactive_list,
mm_list) {
if (vma->obj->pin_count)
continue;
ret = i915_vma_unbind(vma);
if (ret)
goto unlock;
}
}
}
if (val & DROP_UNBOUND) {
......@@ -2078,6 +2176,7 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
{"i915_gem_stolen", i915_gem_stolen_list_info },
{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
{"i915_gem_request", i915_gem_request_info, 0},
{"i915_gem_seqno", i915_gem_seqno_info, 0},
......@@ -2110,7 +2209,7 @@ static struct drm_info_list i915_debugfs_list[] = {
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
struct i915_debugfs_files {
static struct i915_debugfs_files {
const char *name;
const struct file_operations *fops;
} i915_debugfs_files[] = {
......
......@@ -1485,10 +1485,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
i915_dump_device_info(dev_priv);
INIT_LIST_HEAD(&dev_priv->vm_list);
INIT_LIST_HEAD(&dev_priv->gtt.base.global_link);
list_add(&dev_priv->gtt.base.global_link, &dev_priv->vm_list);
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
goto free_priv;
......
......@@ -201,7 +201,6 @@ struct intel_ddi_plls {
#define DRIVER_MINOR 6
#define DRIVER_PATCHLEVEL 0
#define WATCH_COHERENCY 0
#define WATCH_LISTS 0
#define WATCH_GTT 0
......@@ -323,8 +322,8 @@ struct drm_i915_error_state {
u32 purgeable:1;
s32 ring:4;
u32 cache_level:2;
} *active_bo, *pinned_bo;
u32 active_bo_count, pinned_bo_count;
} **active_bo, **pinned_bo;
u32 *active_bo_count, *pinned_bo_count;
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
};
......@@ -359,9 +358,10 @@ struct drm_i915_display_funcs {
struct dpll *match_clock,
struct dpll *best_clock);
void (*update_wm)(struct drm_device *dev);
void (*update_sprite_wm)(struct drm_device *dev, int pipe,
void (*update_sprite_wm)(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
bool enable);
bool enable, bool scaled);
void (*modeset_global_resources)(struct drm_device *dev);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
......@@ -449,8 +449,11 @@ struct intel_device_info {
enum i915_cache_level {
I915_CACHE_NONE = 0,
I915_CACHE_LLC,
I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
caches, eg sampler/render caches, and the
large Last-Level-Cache. LLC is coherent with
the CPU, but L3 is only visible to the GPU. */
};
typedef uint32_t gen6_gtt_pte_t;
......@@ -542,7 +545,12 @@ struct i915_hw_ppgtt {
int (*enable)(struct drm_device *dev);
};
/* To make things as simple as possible (ie. no refcounting), a VMA's lifetime
/**
* A VMA represents a GEM BO that is bound into an address space. Therefore, a
* VMA's presence cannot be guaranteed before binding, or after unbinding the
* object into/from the address space.
*
* To make things as simple as possible (ie. no refcounting), a VMA's lifetime
* will always be <= an objects lifetime. So object refcounting should cover us.
*/
struct i915_vma {
......@@ -550,6 +558,9 @@ struct i915_vma {
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
/** This object's place on the active/inactive lists */
struct list_head mm_list;
struct list_head vma_link; /* Link in the object's VMA list */
};
......@@ -589,7 +600,9 @@ struct i915_fbc {
int interval;
} *fbc_work;
enum {
enum no_fbc_reason {
FBC_OK, /* FBC is enabled */
FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
FBC_NO_OUTPUT, /* no outputs enabled to compress */
FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
......@@ -1045,6 +1058,19 @@ struct intel_vbt_data {
struct child_device_config *child_dev;
};
enum intel_ddb_partitioning {
INTEL_DDB_PART_1_2,
INTEL_DDB_PART_5_6, /* IVB+ */
};
struct intel_wm_level {
bool enable;
uint32_t pri_val;
uint32_t spr_val;
uint32_t cur_val;
uint32_t fbc_val;
};
typedef struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *slab;
......@@ -1214,6 +1240,20 @@ typedef struct drm_i915_private {
struct i915_suspend_saved_registers regfile;
struct {
/*
* Raw watermark latency values:
* in 0.1us units for WM0,
* in 0.5us units for WM1+.
*/
/* primary */
uint16_t pri_latency[5];
/* sprite */
uint16_t spr_latency[5];
/* cursor */
uint16_t cur_latency[5];
} wm;
/* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */
struct i915_dri1_state dri1;
......@@ -1221,6 +1261,11 @@ typedef struct drm_i915_private {
struct i915_ums_state ums;
} drm_i915_private_t;
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
{
return dev->dev_private;
}
/* Iterate over initialised rings */
#define for_each_ring(ring__, dev_priv__, i__) \
for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
......@@ -1265,9 +1310,7 @@ struct drm_i915_gem_object {
struct drm_mm_node *stolen;
struct list_head global_list;
/** This object's place on the active/inactive lists */
struct list_head ring_list;
struct list_head mm_list;
/** This object's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
......@@ -1386,52 +1429,6 @@ struct drm_i915_gem_object {
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
/* This is a temporary define to help transition us to real VMAs. If you see
* this, you're either reviewing code, or bisecting it. */
static inline struct i915_vma *
__i915_gem_obj_to_vma(struct drm_i915_gem_object *obj)
{
if (list_empty(&obj->vma_list))
return NULL;
return list_first_entry(&obj->vma_list, struct i915_vma, vma_link);
}
/* Whether or not this object is currently mapped by the translation tables */
static inline bool
i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o)
{
struct i915_vma *vma = __i915_gem_obj_to_vma(o);
if (vma == NULL)
return false;
return drm_mm_node_allocated(&vma->node);
}
/* Offset of the first PTE pointing to this object */
static inline unsigned long
i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
{
BUG_ON(list_empty(&o->vma_list));
return __i915_gem_obj_to_vma(o)->node.start;
}
/* The size used in the translation tables may be larger than the actual size of
* the object on GEN2/GEN3 because of the way tiling is handled. See
* i915_gem_get_gtt_size() for more details.
*/
static inline unsigned long
i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
{
BUG_ON(list_empty(&o->vma_list));
return __i915_gem_obj_to_vma(o)->node.size;
}
static inline void
i915_gem_obj_ggtt_set_color(struct drm_i915_gem_object *o,
enum i915_cache_level color)
{
__i915_gem_obj_to_vma(o)->node.color = color;
}
/**
* Request queue structure.
*
......@@ -1482,7 +1479,7 @@ struct drm_i915_file_private {
struct i915_ctx_hang_stats hang_stats;
};
#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
#define INTEL_INFO(dev) (to_i915(dev)->info)
#define IS_I830(dev) ((dev)->pci_device == 0x3577)
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
......@@ -1576,7 +1573,7 @@ struct drm_i915_file_private {
#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
......@@ -1668,7 +1665,6 @@ extern void intel_console_resume(struct work_struct *work);
/* i915_irq.c */
void i915_queue_hangcheck(struct drm_device *dev);
void i915_hangcheck_elapsed(unsigned long data);
void i915_handle_error(struct drm_device *dev, bool wedged);
extern void intel_irq_init(struct drm_device *dev);
......@@ -1678,7 +1674,6 @@ extern void intel_pm_init(struct drm_device *dev);
extern void intel_uncore_sanitize(struct drm_device *dev);
extern void intel_uncore_early_sanitize(struct drm_device *dev);
extern void intel_uncore_init(struct drm_device *dev);
extern void intel_uncore_reset(struct drm_device *dev);
extern void intel_uncore_clear_errors(struct drm_device *dev);
extern void intel_uncore_check_errors(struct drm_device *dev);
......@@ -1749,11 +1744,13 @@ struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
void i915_gem_vma_destroy(struct i915_vma *vma);
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
uint32_t alignment,
bool map_and_fenceable,
bool nonblocking);
void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
int __must_check i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
......@@ -1842,9 +1839,6 @@ static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
void i915_gem_reset(struct drm_device *dev);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
......@@ -1897,6 +1891,53 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
void i915_gem_restore_fences(struct drm_device *dev);
unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm);
/* Some GGTT VM helpers */
#define obj_to_ggtt(obj) \
(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
static inline bool i915_is_ggtt(struct i915_address_space *vm)
{
struct i915_address_space *ggtt =
&((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
return vm == ggtt;
}
static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
{
return i915_gem_obj_bound(obj, obj_to_ggtt(obj));
}
static inline unsigned long
i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
{
return i915_gem_obj_offset(obj, obj_to_ggtt(obj));
}
static inline unsigned long
i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
{
return i915_gem_obj_size(obj, obj_to_ggtt(obj));
}
static inline int __must_check
i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
bool map_and_fenceable,
bool nonblocking)
{
return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
map_and_fenceable, nonblocking);
}
#undef obj_to_ggtt
/* i915_gem_context.c */
void i915_gem_context_init(struct drm_device *dev);
void i915_gem_context_fini(struct drm_device *dev);
......@@ -1949,7 +1990,9 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
int __must_check i915_gem_evict_something(struct drm_device *dev,
struct i915_address_space *vm,
int min_size,
unsigned alignment,
unsigned cache_level,
bool mappable,
......@@ -1971,7 +2014,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
/* i915_gem_tiling.c */
inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
......@@ -1984,17 +2027,11 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
/* i915_gem_debug.c */
void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
const char *where, uint32_t mark);
#if WATCH_LISTS
int i915_verify_lists(struct drm_device *dev);
#else
#define i915_verify_lists(dev) 0
#endif
void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
int handle);
void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
const char *where, uint32_t mark);
/* i915_debugfs.c */
int i915_debugfs_init(struct drm_minor *minor);
......
This diff is collapsed.
......@@ -155,7 +155,7 @@ create_hw_context(struct drm_device *dev,
if (INTEL_INFO(dev)->gen >= 7) {
ret = i915_gem_object_set_cache_level(ctx->obj,
I915_CACHE_LLC_MLC);
I915_CACHE_L3_LLC);
/* Failure shouldn't ever happen this early */
if (WARN_ON(ret))
goto err_out;
......@@ -214,7 +214,7 @@ static int create_default_context(struct drm_i915_private *dev_priv)
* default context.
*/
dev_priv->ring[RCS].default_context = ctx;
ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
if (ret) {
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
goto err_destroy;
......@@ -400,7 +400,7 @@ static int do_switch(struct i915_hw_context *to)
if (from == to)
return 0;
ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
if (ret)
return ret;
......@@ -436,7 +436,10 @@ static int do_switch(struct i915_hw_context *to)
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from != NULL) {
struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
struct i915_address_space *ggtt = &dev_priv->gtt.base;
from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list);
i915_gem_object_move_to_active(from->obj, ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
......
......@@ -115,73 +115,4 @@ i915_verify_lists(struct drm_device *dev)
return warned = err;
}
#endif /* WATCH_INACTIVE */
#if WATCH_COHERENCY
void
i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
{
struct drm_device *dev = obj->base.dev;
int page;
uint32_t *gtt_mapping;
uint32_t *backing_map = NULL;
int bad_count = 0;
DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
__func__, obj, obj->gtt_offset, handle,
obj->size / 1024);
gtt_mapping = ioremap(dev_priv->mm.gtt_base_addr + obj->gtt_offset,
obj->base.size);
if (gtt_mapping == NULL) {
DRM_ERROR("failed to map GTT space\n");
return;
}
for (page = 0; page < obj->size / PAGE_SIZE; page++) {
int i;
backing_map = kmap_atomic(obj->pages[page]);
if (backing_map == NULL) {
DRM_ERROR("failed to map backing page\n");
goto out;
}
for (i = 0; i < PAGE_SIZE / 4; i++) {
uint32_t cpuval = backing_map[i];
uint32_t gttval = readl(gtt_mapping +
page * 1024 + i);
if (cpuval != gttval) {
DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
"0x%08x vs 0x%08x\n",
(int)(obj->gtt_offset +
page * PAGE_SIZE + i * 4),
cpuval, gttval);
if (bad_count++ >= 8) {
DRM_INFO("...\n");
goto out;
}
}
}
kunmap_atomic(backing_map);
backing_map = NULL;
}
out:
if (backing_map != NULL)
kunmap_atomic(backing_map);
iounmap(gtt_mapping);
/* give syslog time to catch up */
msleep(1);
/* Directly flush the object, since we just loaded values with the CPU
* from the backing pages and we don't want to disturb the cache
* management that we're trying to observe.
*/
i915_gem_clflush_object(obj);
}
#endif
#endif /* WATCH_LIST */
......@@ -32,24 +32,21 @@
#include "i915_trace.h"
static bool
mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
mark_free(struct i915_vma *vma, struct list_head *unwind)
{
struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
if (obj->pin_count)
if (vma->obj->pin_count)
return false;
list_add(&obj->exec_list, unwind);
list_add(&vma->obj->exec_list, unwind);
return drm_mm_scan_add_block(&vma->node);
}
int
i915_gem_evict_something(struct drm_device *dev, int min_size,
unsigned alignment, unsigned cache_level,
i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
int min_size, unsigned alignment, unsigned cache_level,
bool mappable, bool nonblocking)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct i915_address_space *vm = &dev_priv->gtt.base;
struct list_head eviction_list, unwind_list;
struct i915_vma *vma;
struct drm_i915_gem_object *obj;
......@@ -81,16 +78,17 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
*/
INIT_LIST_HEAD(&unwind_list);
if (mappable)
if (mappable) {
BUG_ON(!i915_is_ggtt(vm));
drm_mm_init_scan_with_range(&vm->mm, min_size,
alignment, cache_level, 0,
dev_priv->gtt.mappable_end);
else
} else
drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
/* First see if there is a large enough contiguous idle region... */
list_for_each_entry(obj, &vm->inactive_list, mm_list) {
if (mark_free(obj, &unwind_list))
list_for_each_entry(vma, &vm->inactive_list, mm_list) {
if (mark_free(vma, &unwind_list))
goto found;
}
......@@ -98,8 +96,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
goto none;
/* Now merge in the soon-to-be-expired objects... */
list_for_each_entry(obj, &vm->active_list, mm_list) {
if (mark_free(obj, &unwind_list))
list_for_each_entry(vma, &vm->active_list, mm_list) {
if (mark_free(vma, &unwind_list))
goto found;
}
......@@ -109,7 +107,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
exec_list);
vma = __i915_gem_obj_to_vma(obj);
vma = i915_gem_obj_to_vma(obj, vm);
ret = drm_mm_scan_remove_block(&vma->node);
BUG_ON(ret);
......@@ -130,7 +128,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
exec_list);
vma = __i915_gem_obj_to_vma(obj);
vma = i915_gem_obj_to_vma(obj, vm);
if (drm_mm_scan_remove_block(&vma->node)) {
list_move(&obj->exec_list, &eviction_list);
drm_gem_object_reference(&obj->base);
......@@ -145,7 +143,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
struct drm_i915_gem_object,
exec_list);
if (ret == 0)
ret = i915_gem_object_unbind(obj);
ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
......@@ -158,13 +156,18 @@ int
i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj, *next;
bool lists_empty;
struct i915_address_space *vm;
struct i915_vma *vma, *next;
bool lists_empty = true;
int ret;
lists_empty = (list_empty(&vm->inactive_list) &&
list_empty(&vm->active_list));
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
lists_empty = (list_empty(&vm->inactive_list) &&
list_empty(&vm->active_list));
if (!lists_empty)
lists_empty = false;
}
if (lists_empty)
return -ENOSPC;
......@@ -181,9 +184,11 @@ i915_gem_evict_everything(struct drm_device *dev)
i915_gem_retire_requests(dev);
/* Having flushed everything, unbind() should never raise an error */
list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list)
if (obj->pin_count == 0)
WARN_ON(i915_gem_object_unbind(obj));
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
if (vma->obj->pin_count == 0)
WARN_ON(i915_vma_unbind(vma));
}
return 0;
}
......@@ -174,7 +174,8 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
struct drm_i915_gem_relocation_entry *reloc)
struct drm_i915_gem_relocation_entry *reloc,
struct i915_address_space *vm)
{
struct drm_device *dev = obj->base.dev;
struct drm_gem_object *target_obj;
......@@ -297,7 +298,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
static int
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
struct eb_objects *eb)
struct eb_objects *eb,
struct i915_address_space *vm)
{
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
......@@ -321,7 +323,8 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
do {
u64 offset = r->presumed_offset;
ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
ret = i915_gem_execbuffer_relocate_entry(obj, eb, r,
vm);
if (ret)
return ret;
......@@ -344,13 +347,15 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
static int
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
struct drm_i915_gem_relocation_entry *relocs)
struct drm_i915_gem_relocation_entry *relocs,
struct i915_address_space *vm)
{
const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int i, ret;
for (i = 0; i < entry->relocation_count; i++) {
ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i],
vm);
if (ret)
return ret;
}
......@@ -359,7 +364,8 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
}
static int
i915_gem_execbuffer_relocate(struct eb_objects *eb)
i915_gem_execbuffer_relocate(struct eb_objects *eb,
struct i915_address_space *vm)
{
struct drm_i915_gem_object *obj;
int ret = 0;
......@@ -373,7 +379,7 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb)
*/
pagefault_disable();
list_for_each_entry(obj, &eb->objects, exec_list) {
ret = i915_gem_execbuffer_relocate_object(obj, eb);
ret = i915_gem_execbuffer_relocate_object(obj, eb, vm);
if (ret)
break;
}
......@@ -395,6 +401,7 @@ need_reloc_mappable(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
struct i915_address_space *vm,
bool *need_reloc)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
......@@ -409,7 +416,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj);
ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable,
false);
if (ret)
return ret;
......@@ -436,8 +444,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
obj->has_aliasing_ppgtt_mapping = 1;
}
if (entry->offset != i915_gem_obj_ggtt_offset(obj)) {
entry->offset = i915_gem_obj_ggtt_offset(obj);
if (entry->offset != i915_gem_obj_offset(obj, vm)) {
entry->offset = i915_gem_obj_offset(obj, vm);
*need_reloc = true;
}
......@@ -458,7 +466,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
{
struct drm_i915_gem_exec_object2 *entry;
if (!i915_gem_obj_ggtt_bound(obj))
if (!i915_gem_obj_bound_any(obj))
return;
entry = obj->exec_entry;
......@@ -475,6 +483,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct list_head *objects,
struct i915_address_space *vm,
bool *need_relocs)
{
struct drm_i915_gem_object *obj;
......@@ -529,32 +538,37 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool need_fence, need_mappable;
u32 obj_offset;
if (!i915_gem_obj_ggtt_bound(obj))
if (!i915_gem_obj_bound(obj, vm))
continue;
obj_offset = i915_gem_obj_offset(obj, vm);
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj);
WARN_ON((need_mappable || need_fence) &&
!i915_is_ggtt(vm));
if ((entry->alignment &&
i915_gem_obj_ggtt_offset(obj) & (entry->alignment - 1)) ||
obj_offset & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj);
ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
else
ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
if (ret)
goto err;
}
/* Bind fresh objects */
list_for_each_entry(obj, objects, exec_list) {
if (i915_gem_obj_ggtt_bound(obj))
if (i915_gem_obj_bound(obj, vm))
continue;
ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
if (ret)
goto err;
}
......@@ -578,7 +592,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_file *file,
struct intel_ring_buffer *ring,
struct eb_objects *eb,
struct drm_i915_gem_exec_object2 *exec)
struct drm_i915_gem_exec_object2 *exec,
struct i915_address_space *vm)
{
struct drm_i915_gem_relocation_entry *reloc;
struct drm_i915_gem_object *obj;
......@@ -662,14 +677,15 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
goto err;
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
if (ret)
goto err;
list_for_each_entry(obj, &eb->objects, exec_list) {
int offset = obj->exec_entry - exec;
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
reloc + reloc_offset[offset]);
reloc + reloc_offset[offset],
vm);
if (ret)
goto err;
}
......@@ -770,6 +786,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
static void
i915_gem_execbuffer_move_to_active(struct list_head *objects,
struct i915_address_space *vm,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *obj;
......@@ -784,6 +801,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
obj->base.read_domains = obj->base.pending_read_domains;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
/* FIXME: This lookup gets fixed later <-- danvet */
list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list);
i915_gem_object_move_to_active(obj, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
......@@ -838,7 +857,8 @@ static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
struct drm_i915_gem_exec_object2 *exec)
struct drm_i915_gem_exec_object2 *exec,
struct i915_address_space *vm)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct eb_objects *eb;
......@@ -1000,17 +1020,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
if (ret)
goto err;
/* The objects are in their final locations, apply the relocations. */
if (need_relocs)
ret = i915_gem_execbuffer_relocate(eb);
ret = i915_gem_execbuffer_relocate(eb, vm);
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
eb, exec);
eb, exec, vm);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
if (ret)
......@@ -1061,7 +1081,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
exec_start = i915_gem_obj_ggtt_offset(batch_obj) + args->batch_start_offset;
exec_start = i915_gem_obj_offset(batch_obj, vm) +
args->batch_start_offset;
exec_len = args->batch_len;
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
......@@ -1086,7 +1107,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
i915_gem_execbuffer_move_to_active(&eb->objects, ring);
i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
err:
......@@ -1107,6 +1128,7 @@ int
i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_execbuffer2 exec2;
struct drm_i915_gem_exec_object *exec_list = NULL;
......@@ -1162,7 +1184,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec2.flags = I915_EXEC_RENDER;
i915_execbuffer2_set_context_id(exec2, 0);
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list,
&dev_priv->gtt.base);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
for (i = 0; i < args->buffer_count; i++)
......@@ -1188,6 +1211,7 @@ int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_execbuffer2 *args = data;
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret;
......@@ -1218,7 +1242,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -EFAULT;
}
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list,
&dev_priv->gtt.base);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
ret = copy_to_user(to_user_ptr(args->buffers_ptr),
......
......@@ -43,7 +43,7 @@
#define GEN6_PTE_UNCACHED (1 << 1)
#define HSW_PTE_UNCACHED (0)
#define GEN6_PTE_CACHE_LLC (2 << 1)
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
......@@ -52,18 +52,40 @@
*/
#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
(((bits) & 0x8) << (11 - 3)))
#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
static gen6_gtt_pte_t gen6_pte_encode(dma_addr_t addr,
enum i915_cache_level level)
static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
enum i915_cache_level level)
{
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
case I915_CACHE_L3_LLC:
case I915_CACHE_LLC:
pte |= GEN6_PTE_CACHE_LLC;
break;
case I915_CACHE_NONE:
pte |= GEN6_PTE_UNCACHED;
break;
default:
WARN_ON(1);
}
return pte;
}
static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
enum i915_cache_level level)
{
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
case I915_CACHE_LLC_MLC:
pte |= GEN6_PTE_CACHE_LLC_MLC;
case I915_CACHE_L3_LLC:
pte |= GEN7_PTE_CACHE_L3_LLC;
break;
case I915_CACHE_LLC:
pte |= GEN6_PTE_CACHE_LLC;
......@@ -72,7 +94,7 @@ static gen6_gtt_pte_t gen6_pte_encode(dma_addr_t addr,
pte |= GEN6_PTE_UNCACHED;
break;
default:
BUG();
WARN_ON(1);
}
return pte;
......@@ -105,7 +127,7 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
pte |= HSW_PTE_ADDR_ENCODE(addr);
if (level != I915_CACHE_NONE)
pte |= HSW_WB_LLC_AGE0;
pte |= HSW_WB_LLC_AGE3;
return pte;
}
......@@ -298,13 +320,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
* now. */
first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
if (IS_HASWELL(dev)) {
ppgtt->base.pte_encode = hsw_pte_encode;
} else if (IS_VALLEYVIEW(dev)) {
ppgtt->base.pte_encode = byt_pte_encode;
} else {
ppgtt->base.pte_encode = gen6_pte_encode;
}
ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
ppgtt->enable = gen6_ppgtt_enable;
ppgtt->base.clear_range = gen6_ppgtt_clear_range;
......@@ -648,7 +664,8 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
* aperture. One page should be enough to keep any prefetching inside
* of the aperture.
*/
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
struct drm_mm_node *entry;
struct drm_i915_gem_object *obj;
unsigned long hole_start, hole_end;
......@@ -656,19 +673,19 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
BUG_ON(mappable_end > end);
/* Subtract the guard page ... */
drm_mm_init(&dev_priv->gtt.base.mm, start, end - start - PAGE_SIZE);
drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
if (!HAS_LLC(dev))
dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
/* Mark any preallocated objects as occupied */
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
int ret;
DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
i915_gem_obj_ggtt_offset(obj), obj->base.size);
WARN_ON(i915_gem_obj_ggtt_bound(obj));
ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, &vma->node);
ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
if (ret)
DRM_DEBUG_KMS("Reservation failed\n");
obj->has_global_gtt_mapping = 1;
......@@ -679,19 +696,15 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
dev_priv->gtt.base.total = end - start;
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &dev_priv->gtt.base.mm,
hole_start, hole_end) {
drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
hole_start / PAGE_SIZE,
count);
ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count);
}
/* And finally clear the reserved guard page */
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
end / PAGE_SIZE - 1, 1);
ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1);
}
static bool
......@@ -898,8 +911,10 @@ int i915_gem_gtt_init(struct drm_device *dev)
gtt->base.pte_encode = hsw_pte_encode;
else if (IS_VALLEYVIEW(dev))
gtt->base.pte_encode = byt_pte_encode;
else if (INTEL_INFO(dev)->gen >= 7)
gtt->base.pte_encode = ivb_pte_encode;
else
gtt->base.pte_encode = gen6_pte_encode;
gtt->base.pte_encode = snb_pte_encode;
}
ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
......
......@@ -349,7 +349,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *vm = &dev_priv->gtt.base;
struct i915_address_space *ggtt = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
struct i915_vma *vma;
......@@ -393,7 +393,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj;
vma = i915_gem_vma_create(obj, &dev_priv->gtt.base);
vma = i915_gem_vma_create(obj, ggtt);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_out;
......@@ -406,8 +406,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
*/
vma->node.start = gtt_offset;
vma->node.size = size;
if (drm_mm_initialized(&dev_priv->gtt.base.mm)) {
ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, &vma->node);
if (drm_mm_initialized(&ggtt->mm)) {
ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
i915_gem_vma_destroy(vma);
......@@ -418,7 +418,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
obj->has_global_gtt_mapping = 1;
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &vm->inactive_list);
list_add_tail(&vma->mm_list, &ggtt->inactive_list);
return obj;
......
......@@ -360,17 +360,18 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
obj->map_and_fenceable =
!i915_gem_obj_ggtt_bound(obj) ||
(i915_gem_obj_ggtt_offset(obj) + obj->base.size <= dev_priv->gtt.mappable_end &&
(i915_gem_obj_ggtt_offset(obj) +
obj->base.size <= dev_priv->gtt.mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode));
/* Rebind if we need a change of alignment */
if (!obj->map_and_fenceable) {
u32 unfenced_alignment =
u32 unfenced_align =
i915_gem_get_gtt_alignment(dev, obj->base.size,
args->tiling_mode,
false);
if (i915_gem_obj_ggtt_offset(obj) & (unfenced_alignment - 1))
ret = i915_gem_object_unbind(obj);
if (i915_gem_obj_ggtt_offset(obj) & (unfenced_align - 1))
ret = i915_gem_object_ggtt_unbind(obj);
}
if (ret == 0) {
......
......@@ -304,13 +304,13 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (error->active_bo)
print_error_buffers(m, "Active",
error->active_bo,
error->active_bo_count);
error->active_bo[0],
error->active_bo_count[0]);
if (error->pinned_bo)
print_error_buffers(m, "Pinned",
error->pinned_bo,
error->pinned_bo_count);
error->pinned_bo[0],
error->pinned_bo_count[0]);
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
struct drm_i915_error_object *obj;
......@@ -556,11 +556,11 @@ static void capture_bo(struct drm_i915_error_buffer *err,
static u32 capture_active_bo(struct drm_i915_error_buffer *err,
int count, struct list_head *head)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int i = 0;
list_for_each_entry(obj, head, mm_list) {
capture_bo(err++, obj);
list_for_each_entry(vma, head, mm_list) {
capture_bo(err++, vma->obj);
if (++i == count)
break;
}
......@@ -622,7 +622,8 @@ static struct drm_i915_error_object *
i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
struct intel_ring_buffer *ring)
{
struct i915_address_space *vm = &dev_priv->gtt.base;
struct i915_address_space *vm;
struct i915_vma *vma;
struct drm_i915_gem_object *obj;
u32 seqno;
......@@ -642,20 +643,23 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
}
seqno = ring->get_seqno(ring, false);
list_for_each_entry(obj, &vm->active_list, mm_list) {
if (obj->ring != ring)
continue;
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
list_for_each_entry(vma, &vm->active_list, mm_list) {
obj = vma->obj;
if (obj->ring != ring)
continue;
if (i915_seqno_passed(seqno, obj->last_read_seqno))
continue;
if (i915_seqno_passed(seqno, obj->last_read_seqno))
continue;
if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
continue;
if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
continue;
/* We need to copy these to an anonymous buffer as the simplest
* method to avoid being overwritten by userspace.
*/
return i915_error_object_create(dev_priv, obj);
/* We need to copy these to an anonymous buffer as the simplest
* method to avoid being overwritten by userspace.
*/
return i915_error_object_create(dev_priv, obj);
}
}
return NULL;
......@@ -771,41 +775,72 @@ static void i915_gem_record_rings(struct drm_device *dev,
}
}
static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
/* FIXME: Since pin count/bound list is global, we duplicate what we capture per
* VM.
*/
static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error,
struct i915_address_space *vm,
const int ndx)
{
struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int i;
i = 0;
list_for_each_entry(obj, &vm->active_list, mm_list)
list_for_each_entry(vma, &vm->active_list, mm_list)
i++;
error->active_bo_count = i;
error->active_bo_count[ndx] = i;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
if (obj->pin_count)
i++;
error->pinned_bo_count = i - error->active_bo_count;
error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
if (i) {
error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
GFP_ATOMIC);
if (error->active_bo)
error->pinned_bo =
error->active_bo + error->active_bo_count;
active_bo = kmalloc(sizeof(*active_bo)*i, GFP_ATOMIC);
if (active_bo)
pinned_bo = active_bo + error->active_bo_count[ndx];
}
if (error->active_bo)
error->active_bo_count =
capture_active_bo(error->active_bo,
error->active_bo_count,
if (active_bo)
error->active_bo_count[ndx] =
capture_active_bo(active_bo,
error->active_bo_count[ndx],
&vm->active_list);
if (error->pinned_bo)
error->pinned_bo_count =
capture_pinned_bo(error->pinned_bo,
error->pinned_bo_count,
if (pinned_bo)
error->pinned_bo_count[ndx] =
capture_pinned_bo(pinned_bo,
error->pinned_bo_count[ndx],
&dev_priv->mm.bound_list);
error->active_bo[ndx] = active_bo;
error->pinned_bo[ndx] = pinned_bo;
}
static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
struct i915_address_space *vm;
int cnt = 0, i = 0;
list_for_each_entry(vm, &dev_priv->vm_list, global_link)
cnt++;
if (WARN(cnt > 1, "Multiple VMs not yet supported\n"))
cnt = 1;
vm = &dev_priv->gtt.base;
error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
GFP_ATOMIC);
error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
GFP_ATOMIC);
list_for_each_entry(vm, &dev_priv->vm_list, global_link)
i915_gem_capture_vm(dev_priv, error, vm, i++);
}
/**
......@@ -938,8 +973,8 @@ const char *i915_cache_level_str(int type)
{
switch (type) {
case I915_CACHE_NONE: return " uncached";
case I915_CACHE_LLC: return " snooped (LLC)";
case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
case I915_CACHE_LLC: return " snooped or LLC";
case I915_CACHE_L3_LLC: return " L3+LLC";
default: return "";
}
}
......
......@@ -1869,7 +1869,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
* we kick the ring. If we see no progress on three subsequent calls
* we assume chip is wedged and try to fix it by resetting the chip.
*/
void i915_hangcheck_elapsed(unsigned long data)
static void i915_hangcheck_elapsed(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;
......
......@@ -3196,9 +3196,6 @@
#define MLTR_WM2_SHIFT 8
/* the unit of memory self-refresh latency time is 0.5us */
#define ILK_SRLT_MASK 0x3f
#define ILK_LATENCY(shift) (I915_READ(MLTR_ILK) >> (shift) & ILK_SRLT_MASK)
#define ILK_READ_WM1_LATENCY() ILK_LATENCY(MLTR_WM1_SHIFT)
#define ILK_READ_WM2_LATENCY() ILK_LATENCY(MLTR_WM2_SHIFT)
/* define the fifo size on Ironlake */
#define ILK_DISPLAY_FIFO 128
......@@ -3245,12 +3242,6 @@
#define SSKPD_WM2_SHIFT 16
#define SSKPD_WM3_SHIFT 24
#define SNB_LATENCY(shift) (I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK)
#define SNB_READ_WM0_LATENCY() SNB_LATENCY(SSKPD_WM0_SHIFT)
#define SNB_READ_WM1_LATENCY() SNB_LATENCY(SSKPD_WM1_SHIFT)
#define SNB_READ_WM2_LATENCY() SNB_LATENCY(SSKPD_WM2_SHIFT)
#define SNB_READ_WM3_LATENCY() SNB_LATENCY(SSKPD_WM3_SHIFT)
/*
* The two pipe frame counter registers are not synchronized, so
* reading a stable value is somewhat tricky. The following code
......
......@@ -33,47 +33,52 @@ TRACE_EVENT(i915_gem_object_create,
TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
);
TRACE_EVENT(i915_gem_object_bind,
TP_PROTO(struct drm_i915_gem_object *obj, bool mappable),
TP_ARGS(obj, mappable),
TRACE_EVENT(i915_vma_bind,
TP_PROTO(struct i915_vma *vma, bool mappable),
TP_ARGS(vma, mappable),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(struct i915_address_space *, vm)
__field(u32, offset)
__field(u32, size)
__field(bool, mappable)
),
TP_fast_assign(
__entry->obj = obj;
__entry->offset = i915_gem_obj_ggtt_offset(obj);
__entry->size = i915_gem_obj_ggtt_size(obj);
__entry->obj = vma->obj;
__entry->vm = vma->vm;
__entry->offset = vma->node.start;
__entry->size = vma->node.size;
__entry->mappable = mappable;
),
TP_printk("obj=%p, offset=%08x size=%x%s",
TP_printk("obj=%p, offset=%08x size=%x%s vm=%p",
__entry->obj, __entry->offset, __entry->size,
__entry->mappable ? ", mappable" : "")
__entry->mappable ? ", mappable" : "",
__entry->vm)
);
TRACE_EVENT(i915_gem_object_unbind,
TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj),
TRACE_EVENT(i915_vma_unbind,
TP_PROTO(struct i915_vma *vma),
TP_ARGS(vma),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(struct i915_address_space *, vm)
__field(u32, offset)
__field(u32, size)
),
TP_fast_assign(
__entry->obj = obj;
__entry->offset = i915_gem_obj_ggtt_offset(obj);
__entry->size = i915_gem_obj_ggtt_size(obj);
__entry->obj = vma->obj;
__entry->vm = vma->vm;
__entry->offset = vma->node.start;
__entry->size = vma->node.size;
),
TP_printk("obj=%p, offset=%08x size=%x",
__entry->obj, __entry->offset, __entry->size)
TP_printk("obj=%p, offset=%08x size=%x vm=%p",
__entry->obj, __entry->offset, __entry->size, __entry->vm)
);
TRACE_EVENT(i915_gem_object_change_domain,
......
......@@ -28,7 +28,7 @@ static const u8 intel_dsm_guid[] = {
0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c
};
static int intel_dsm(acpi_handle handle, int func, int arg)
static int intel_dsm(acpi_handle handle, int func)
{
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_object_list input;
......@@ -46,8 +46,9 @@ static int intel_dsm(acpi_handle handle, int func, int arg)
params[1].integer.value = INTEL_DSM_REVISION_ID;
params[2].type = ACPI_TYPE_INTEGER;
params[2].integer.value = func;
params[3].type = ACPI_TYPE_INTEGER;
params[3].integer.value = arg;
params[3].type = ACPI_TYPE_PACKAGE;
params[3].package.count = 0;
params[3].package.elements = NULL;
ret = acpi_evaluate_object(handle, "_DSM", &input, &output);
if (ret) {
......@@ -151,8 +152,9 @@ static void intel_dsm_platform_mux_info(void)
params[1].integer.value = INTEL_DSM_REVISION_ID;
params[2].type = ACPI_TYPE_INTEGER;
params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO;
params[3].type = ACPI_TYPE_INTEGER;
params[3].integer.value = 0;
params[3].type = ACPI_TYPE_PACKAGE;
params[3].package.count = 0;
params[3].package.elements = NULL;
ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input,
&output);
......@@ -205,7 +207,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev)
return false;
}
ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0);
ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS);
if (ret < 0) {
DRM_DEBUG_KMS("failed to get supported _DSM functions\n");
return false;
......
......@@ -52,15 +52,14 @@ struct intel_crt {
u32 adpa_reg;
};
static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
{
return container_of(intel_attached_encoder(connector),
struct intel_crt, base);
return container_of(encoder, struct intel_crt, base);
}
static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
{
return container_of(encoder, struct intel_crt, base);
return intel_encoder_to_crt(intel_attached_encoder(connector));
}
static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
......@@ -238,17 +237,14 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
return true;
}
static void intel_crt_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
static void intel_crt_mode_set(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crt *crt =
intel_encoder_to_crt(to_intel_encoder(encoder));
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = encoder->base.dev;
struct intel_crt *crt = intel_encoder_to_crt(encoder);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
u32 adpa;
if (HAS_PCH_SPLIT(dev))
......@@ -265,14 +261,14 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
if (HAS_PCH_LPT(dev))
; /* Those bits don't exist here */
else if (HAS_PCH_CPT(dev))
adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
else if (intel_crtc->pipe == 0)
adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
else if (crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT;
else
adpa |= ADPA_PIPE_B_SELECT;
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
I915_WRITE(BCLRPAT(crtc->pipe), 0);
I915_WRITE(crt->adpa_reg, adpa);
}
......@@ -711,10 +707,6 @@ static void intel_crt_reset(struct drm_connector *connector)
* Routines for controlling stuff on the analog port
*/
static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
.mode_set = intel_crt_mode_set,
};
static const struct drm_connector_funcs intel_crt_connector_funcs = {
.reset = intel_crt_reset,
.dpms = intel_crt_dpms,
......@@ -804,6 +796,7 @@ void intel_crt_init(struct drm_device *dev)
crt->adpa_reg = ADPA;
crt->base.compute_config = intel_crt_compute_config;
crt->base.mode_set = intel_crt_mode_set;
crt->base.disable = intel_disable_crt;
crt->base.enable = intel_enable_crt;
crt->base.get_config = intel_crt_get_config;
......@@ -815,7 +808,6 @@ void intel_crt_init(struct drm_device *dev)
crt->base.get_hw_state = intel_crt_get_hw_state;
intel_connector->get_hw_state = intel_connector_get_hw_state;
drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_sysfs_connector_add(connector);
......
......@@ -84,25 +84,17 @@ static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
* in either FDI or DP modes only, as HDMI connections will work with both
* of those
*/
static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
bool use_fdi_mode)
static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
int i;
const u32 *ddi_translations = ((use_fdi_mode) ?
const u32 *ddi_translations = (port == PORT_E) ?
hsw_ddi_translations_fdi :
hsw_ddi_translations_dp);
hsw_ddi_translations_dp;
DRM_DEBUG_DRIVER("Initializing DDI buffers for port %c in %s mode\n",
port_name(port),
use_fdi_mode ? "FDI" : "DP");
WARN((use_fdi_mode && (port != PORT_E)),
"Programming port %c in FDI mode, this probably will not work.\n",
port_name(port));
for (i=0, reg=DDI_BUF_TRANS(port); i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
for (i = 0, reg = DDI_BUF_TRANS(port);
i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
I915_WRITE(reg, ddi_translations[i]);
reg += 4;
}
......@@ -118,14 +110,8 @@ void intel_prepare_ddi(struct drm_device *dev)
if (!HAS_DDI(dev))
return;
for (port = PORT_A; port < PORT_E; port++)
intel_prepare_ddi_buffers(dev, port, false);
/* DDI E is the suggested one to work in FDI mode, so program is as such
* by default. It will have to be re-programmed in case a digital DP
* output will be detected on it
*/
intel_prepare_ddi_buffers(dev, PORT_E, true);
for (port = PORT_A; port <= PORT_E; port++)
intel_prepare_ddi_buffers(dev, port);
}
static const long hsw_ddi_buf_ctl_values[] = {
......@@ -281,25 +267,22 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
DRM_ERROR("FDI link training failed!\n");
}
static void intel_ddi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
static void intel_ddi_mode_set(struct intel_encoder *encoder)
{
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
int port = intel_ddi_get_encoder_port(intel_encoder);
int pipe = intel_crtc->pipe;
int type = intel_encoder->type;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
int port = intel_ddi_get_encoder_port(encoder);
int pipe = crtc->pipe;
int type = encoder->type;
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
DRM_DEBUG_KMS("Preparing DDI mode on port %c, pipe %c\n",
port_name(port), pipe_name(pipe));
intel_crtc->eld_vld = false;
crtc->eld_vld = false;
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(encoder);
enc_to_dig_port(&encoder->base);
intel_dp->DP = intel_dig_port->saved_port_bits |
DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
......@@ -307,17 +290,17 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
if (intel_dp->has_audio) {
DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
pipe_name(intel_crtc->pipe));
pipe_name(crtc->pipe));
/* write eld */
DRM_DEBUG_DRIVER("DP audio: write eld information\n");
intel_write_eld(encoder, adjusted_mode);
intel_write_eld(&encoder->base, adjusted_mode);
}
intel_dp_init_link_config(intel_dp);
} else if (type == INTEL_OUTPUT_HDMI) {
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
if (intel_hdmi->has_audio) {
/* Proper support for digital audio needs a new logic
......@@ -325,14 +308,14 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
* patch bombing.
*/
DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
pipe_name(intel_crtc->pipe));
pipe_name(crtc->pipe));
/* write eld */
DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
intel_write_eld(encoder, adjusted_mode);
intel_write_eld(&encoder->base, adjusted_mode);
}
intel_hdmi->set_infoframes(encoder, adjusted_mode);
intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
}
}
......@@ -1311,10 +1294,6 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
.destroy = intel_ddi_destroy,
};
static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
.mode_set = intel_ddi_mode_set,
};
void intel_ddi_init(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
......@@ -1339,9 +1318,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
drm_encoder_init(dev, encoder, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs);
intel_encoder->compute_config = intel_ddi_compute_config;
intel_encoder->mode_set = intel_ddi_mode_set;
intel_encoder->enable = intel_enable_ddi;
intel_encoder->pre_enable = intel_ddi_pre_enable;
intel_encoder->disable = intel_disable_ddi;
......
......@@ -50,6 +50,10 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config);
static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *old_fb);
typedef struct {
int min, max;
} intel_range_t;
......@@ -59,7 +63,6 @@ typedef struct {
int p2_slow, p2_fast;
} intel_p2_t;
#define INTEL_P2_NUM 2
typedef struct intel_limit intel_limit_t;
struct intel_limit {
intel_range_t dot, vco, n, m, m1, m2, p, p1;
......@@ -3653,8 +3656,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
intel_update_watermarks(dev);
mutex_lock(&dev_priv->dpio_lock);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
......@@ -3665,10 +3666,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
/* VLV wants encoder enabling _before_ the pipe is up. */
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
i9xx_pfit_enable(intel_crtc);
intel_crtc_load_lut(crtc);
......@@ -3680,7 +3677,8 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
intel_update_fbc(dev);
mutex_unlock(&dev_priv->dpio_lock);
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
}
static void i9xx_crtc_enable(struct drm_crtc *crtc)
......@@ -3877,16 +3875,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
}
}
void intel_modeset_disable(struct drm_device *dev)
{
struct drm_crtc *crtc;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (crtc->enabled)
intel_crtc_disable(crtc);
}
}
void intel_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
......@@ -3895,10 +3883,10 @@ void intel_encoder_destroy(struct drm_encoder *encoder)
kfree(intel_encoder);
}
/* Simple dpms helper for encodres with just one connector, no cloning and only
/* Simple dpms helper for encoders with just one connector, no cloning and only
* one kind of off state. It clamps all !ON modes to fully OFF and changes the
* state of the entire output pipe. */
void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
{
if (mode == DRM_MODE_DPMS_ON) {
encoder->connectors_active = true;
......@@ -4092,7 +4080,7 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc,
{
pipe_config->ips_enabled = i915_enable_ips &&
hsw_crtc_supports_ips(crtc) &&
pipe_config->pipe_bpp == 24;
pipe_config->pipe_bpp <= 24;
}
static int intel_crtc_compute_config(struct intel_crtc *crtc,
......@@ -4108,12 +4096,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
return -EINVAL;
}
/* All interlaced capable intel hw wants timings in frames. Note though
* that intel_lvds_mode_fixup does some funny tricks with the crtc
* timings, so we need to be careful not to clobber these.*/
if (!pipe_config->timings_set)
drm_mode_set_crtcinfo(adjusted_mode, 0);
/* Cantiga+ cannot handle modes with a hsync front porch of 0.
* WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
*/
......@@ -6220,11 +6202,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_encoder_helper_funcs *encoder_funcs;
struct intel_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
int pipe = intel_crtc->pipe;
int ret;
......@@ -6243,12 +6222,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
encoder->base.base.id,
drm_get_encoder_name(&encoder->base),
mode->base.id, mode->name);
if (encoder->mode_set) {
encoder->mode_set(encoder);
} else {
encoder_funcs = encoder->base.helper_private;
encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
}
encoder->mode_set(encoder);
}
return 0;
......@@ -8061,7 +8035,6 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct drm_encoder_helper_funcs *encoder_funcs;
struct intel_encoder *encoder;
struct intel_crtc_config *pipe_config;
int plane_bpp, ret = -EINVAL;
......@@ -8082,6 +8055,19 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
(enum transcoder) to_intel_crtc(crtc)->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
/*
* Sanitize sync polarity flags based on requested ones. If neither
* positive or negative polarity is requested, treat this as meaning
* negative polarity.
*/
if (!(pipe_config->adjusted_mode.flags &
(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
if (!(pipe_config->adjusted_mode.flags &
(DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
/* Compute a starting value for pipe_config->pipe_bpp taking the source
* plane pixel format and any sink constraints into account. Returns the
* source plane bpp so that dithering can be selected on mismatches
......@@ -8096,6 +8082,9 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
pipe_config->port_clock = 0;
pipe_config->pixel_multiplier = 1;
/* Fill in default crtc timings, allow encoders to overwrite them. */
drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, 0);
/* Pass our mode to the connectors and the CRTC to give them a chance to
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
......@@ -8106,20 +8095,8 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
if (&encoder->new_crtc->base != crtc)
continue;
if (encoder->compute_config) {
if (!(encoder->compute_config(encoder, pipe_config))) {
DRM_DEBUG_KMS("Encoder config failure\n");
goto fail;
}
continue;
}
encoder_funcs = encoder->base.helper_private;
if (!(encoder_funcs->mode_fixup(&encoder->base,
&pipe_config->requested_mode,
&pipe_config->adjusted_mode))) {
DRM_DEBUG_KMS("Encoder fixup failed\n");
if (!(encoder->compute_config(encoder, pipe_config))) {
DRM_DEBUG_KMS("Encoder config failure\n");
goto fail;
}
}
......@@ -8759,9 +8736,9 @@ static int __intel_set_mode(struct drm_crtc *crtc,
return ret;
}
int intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb)
static int intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb)
{
int ret;
......@@ -9358,8 +9335,13 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
/* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
PORT_C);
if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C,
PORT_C);
}
if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
......@@ -9419,13 +9401,17 @@ static void intel_setup_outputs(struct drm_device *dev)
drm_helper_move_panel_connectors_to_head(dev);
}
void intel_framebuffer_fini(struct intel_framebuffer *fb)
{
drm_framebuffer_cleanup(&fb->base);
drm_gem_object_unreference_unlocked(&fb->obj->base);
}
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
drm_framebuffer_cleanup(fb);
drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
intel_framebuffer_fini(intel_fb);
kfree(intel_fb);
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment