diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 248a05d0291766991f4ffce6e9025a698d75ddb4..41bd2bf28f4c7634b552c552561cc7aa7684a1ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -24,7 +24,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
 	atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
 	amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
 	amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
-	amdgpu_gtt_mgr.o
+	amdgpu_gtt_mgr.o amdgpu_vram_mgr.o
 
 # add asic specific block
 amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
index b8d66670bb17eb5aa8261764f15918ebb9bde050..06192698bd96e786a9b95b24e4ee0892ce961627 100644
--- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h
+++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
@@ -90,7 +90,6 @@
 #define ENCODER_OBJECT_ID_INTERNAL_VCE            0x24
 #define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3        0x25
 #define ENCODER_OBJECT_ID_INTERNAL_AMCLK          0x27
-#define ENCODER_OBJECT_ID_VIRTUAL                 0x28
 
 #define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO    0xFF
 
@@ -120,7 +119,6 @@
 #define CONNECTOR_OBJECT_ID_eDP                   0x14
 #define CONNECTOR_OBJECT_ID_MXM                   0x15
 #define CONNECTOR_OBJECT_ID_LVDS_eDP              0x16
-#define CONNECTOR_OBJECT_ID_VIRTUAL               0x17
 
 /* deleted */
 
@@ -149,7 +147,6 @@
 #define GRAPH_OBJECT_ENUM_ID5                     0x05
 #define GRAPH_OBJECT_ENUM_ID6                     0x06
 #define GRAPH_OBJECT_ENUM_ID7                     0x07
-#define GRAPH_OBJECT_ENUM_VIRTUAL                 0x08
 
 /****************************************************/
 /* Graphics Object ID Bit definition                */
@@ -411,10 +408,6 @@
                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
                                                   ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT)
 
-#define ENCODER_VIRTUAL_ENUM_VIRTUAL            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-                                                  GRAPH_OBJECT_ENUM_VIRTUAL << ENUM_ID_SHIFT |\
-                                                  ENCODER_OBJECT_ID_VIRTUAL << OBJECT_ID_SHIFT)
-
 /****************************************************/
 /* Connector Object ID definition - Shared with BIOS */
 /****************************************************/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 039b57e4644c3936bfe345426ff17754847b199b..217df2459a980550d191bdbbde7094fc5e409b94 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -53,7 +53,11 @@
 #include "amdgpu_ucode.h"
 #include "amdgpu_ttm.h"
 #include "amdgpu_gds.h"
+#include "amdgpu_sync.h"
+#include "amdgpu_ring.h"
+#include "amdgpu_vm.h"
 #include "amd_powerplay.h"
+#include "amdgpu_dpm.h"
 #include "amdgpu_acp.h"
 
 #include "gpu_scheduler.h"
@@ -97,6 +101,7 @@ extern char *amdgpu_disable_cu;
 extern int amdgpu_sclk_deep_sleep_en;
 extern char *amdgpu_virtual_display;
 extern unsigned amdgpu_pp_feature_mask;
+extern int amdgpu_vram_page_split;
 
 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS	        3000
 #define AMDGPU_MAX_USEC_TIMEOUT			100000	/* 100 ms */
@@ -107,12 +112,6 @@ extern unsigned amdgpu_pp_feature_mask;
 #define AMDGPUFB_CONN_LIMIT			4
 #define AMDGPU_BIOS_NUM_SCRATCH			8
 
-/* max number of rings */
-#define AMDGPU_MAX_RINGS			16
-#define AMDGPU_MAX_GFX_RINGS			1
-#define AMDGPU_MAX_COMPUTE_RINGS		8
-#define AMDGPU_MAX_VCE_RINGS			3
-
 /* max number of IP instances */
 #define AMDGPU_MAX_SDMA_INSTANCES		2
 
@@ -152,8 +151,6 @@ extern unsigned amdgpu_pp_feature_mask;
 
 struct amdgpu_device;
 struct amdgpu_ib;
-struct amdgpu_vm;
-struct amdgpu_ring;
 struct amdgpu_cs_parser;
 struct amdgpu_job;
 struct amdgpu_irq_src;
@@ -198,21 +195,38 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev,
 bool amdgpu_is_idle(struct amdgpu_device *adev,
 		    enum amd_ip_block_type block_type);
 
+#define AMDGPU_MAX_IP_NUM 16
+
+struct amdgpu_ip_block_status {
+	bool valid;
+	bool sw;
+	bool hw;
+	bool late_initialized;
+	bool hang;
+};
+
 struct amdgpu_ip_block_version {
-	enum amd_ip_block_type type;
-	u32 major;
-	u32 minor;
-	u32 rev;
+	const enum amd_ip_block_type type;
+	const u32 major;
+	const u32 minor;
+	const u32 rev;
 	const struct amd_ip_funcs *funcs;
 };
 
+struct amdgpu_ip_block {
+	struct amdgpu_ip_block_status status;
+	const struct amdgpu_ip_block_version *version;
+};
+
 int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
 				enum amd_ip_block_type type,
 				u32 major, u32 minor);
 
-const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
-					struct amdgpu_device *adev,
-					enum amd_ip_block_type type);
+struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
+					     enum amd_ip_block_type type);
+
+int amdgpu_ip_block_add(struct amdgpu_device *adev,
+			const struct amdgpu_ip_block_version *ip_block_version);
 
 /* provided by hw blocks that can move/clear data.  e.g., gfx or sdma */
 struct amdgpu_buffer_funcs {
@@ -286,47 +300,6 @@ struct amdgpu_ih_funcs {
 	void (*set_rptr)(struct amdgpu_device *adev);
 };
 
-/* provided by hw blocks that expose a ring buffer for commands */
-struct amdgpu_ring_funcs {
-	/* ring read/write ptr handling */
-	u32 (*get_rptr)(struct amdgpu_ring *ring);
-	u32 (*get_wptr)(struct amdgpu_ring *ring);
-	void (*set_wptr)(struct amdgpu_ring *ring);
-	/* validating and patching of IBs */
-	int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
-	/* command emit functions */
-	void (*emit_ib)(struct amdgpu_ring *ring,
-			struct amdgpu_ib *ib,
-			unsigned vm_id, bool ctx_switch);
-	void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
-			   uint64_t seq, unsigned flags);
-	void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
-	void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
-			      uint64_t pd_addr);
-	void (*emit_hdp_flush)(struct amdgpu_ring *ring);
-	void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
-	void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
-				uint32_t gds_base, uint32_t gds_size,
-				uint32_t gws_base, uint32_t gws_size,
-				uint32_t oa_base, uint32_t oa_size);
-	/* testing functions */
-	int (*test_ring)(struct amdgpu_ring *ring);
-	int (*test_ib)(struct amdgpu_ring *ring, long timeout);
-	/* insert NOP packets */
-	void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
-	/* pad the indirect buffer to the necessary number of dw */
-	void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
-	unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
-	void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
-	/* note usage for clock and power gating */
-	void (*begin_use)(struct amdgpu_ring *ring);
-	void (*end_use)(struct amdgpu_ring *ring);
-	void (*emit_switch_buffer) (struct amdgpu_ring *ring);
-	void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
-	unsigned (*get_emit_ib_size) (struct amdgpu_ring *ring);
-	unsigned (*get_dma_frame_size) (struct amdgpu_ring *ring);
-};
-
 /*
  * BIOS.
  */
@@ -363,47 +336,6 @@ struct amdgpu_clock {
 	uint32_t max_pixel_clock;
 };
 
-/*
- * Fences.
- */
-struct amdgpu_fence_driver {
-	uint64_t			gpu_addr;
-	volatile uint32_t		*cpu_addr;
-	/* sync_seq is protected by ring emission lock */
-	uint32_t			sync_seq;
-	atomic_t			last_seq;
-	bool				initialized;
-	struct amdgpu_irq_src		*irq_src;
-	unsigned			irq_type;
-	struct timer_list		fallback_timer;
-	unsigned			num_fences_mask;
-	spinlock_t			lock;
-	struct fence			**fences;
-};
-
-/* some special values for the owner field */
-#define AMDGPU_FENCE_OWNER_UNDEFINED	((void*)0ul)
-#define AMDGPU_FENCE_OWNER_VM		((void*)1ul)
-
-#define AMDGPU_FENCE_FLAG_64BIT         (1 << 0)
-#define AMDGPU_FENCE_FLAG_INT           (1 << 1)
-
-int amdgpu_fence_driver_init(struct amdgpu_device *adev);
-void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
-void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
-
-int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
-				  unsigned num_hw_submission);
-int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
-				   struct amdgpu_irq_src *irq_src,
-				   unsigned irq_type);
-void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
-void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence);
-void amdgpu_fence_process(struct amdgpu_ring *ring);
-int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
-unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
-
 /*
  * BO.
  */
@@ -464,7 +396,6 @@ struct amdgpu_bo {
 	 */
 	struct list_head		va;
 	/* Constant after initialization */
-	struct amdgpu_device		*adev;
 	struct drm_gem_object		gem_base;
 	struct amdgpu_bo		*parent;
 	struct amdgpu_bo		*shadow;
@@ -561,27 +492,6 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
 int amdgpu_mode_dumb_mmap(struct drm_file *filp,
 			  struct drm_device *dev,
 			  uint32_t handle, uint64_t *offset_p);
-/*
- * Synchronization
- */
-struct amdgpu_sync {
-	DECLARE_HASHTABLE(fences, 4);
-	struct fence	        *last_vm_update;
-};
-
-void amdgpu_sync_create(struct amdgpu_sync *sync);
-int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
-		      struct fence *f);
-int amdgpu_sync_resv(struct amdgpu_device *adev,
-		     struct amdgpu_sync *sync,
-		     struct reservation_object *resv,
-		     void *owner);
-struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
-				     struct amdgpu_ring *ring);
-struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
-void amdgpu_sync_free(struct amdgpu_sync *sync);
-int amdgpu_sync_init(void);
-void amdgpu_sync_fini(void);
 int amdgpu_fence_slab_init(void);
 void amdgpu_fence_slab_fini(void);
 
@@ -723,14 +633,6 @@ struct amdgpu_ib {
 	uint32_t			flags;
 };
 
-enum amdgpu_ring_type {
-	AMDGPU_RING_TYPE_GFX,
-	AMDGPU_RING_TYPE_COMPUTE,
-	AMDGPU_RING_TYPE_SDMA,
-	AMDGPU_RING_TYPE_UVD,
-	AMDGPU_RING_TYPE_VCE
-};
-
 extern const struct amd_sched_backend_ops amdgpu_sched_ops;
 
 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -744,213 +646,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
 		      struct amd_sched_entity *entity, void *owner,
 		      struct fence **f);
 
-struct amdgpu_ring {
-	struct amdgpu_device		*adev;
-	const struct amdgpu_ring_funcs	*funcs;
-	struct amdgpu_fence_driver	fence_drv;
-	struct amd_gpu_scheduler	sched;
-
-	struct amdgpu_bo	*ring_obj;
-	volatile uint32_t	*ring;
-	unsigned		rptr_offs;
-	unsigned		wptr;
-	unsigned		wptr_old;
-	unsigned		ring_size;
-	unsigned		max_dw;
-	int			count_dw;
-	uint64_t		gpu_addr;
-	uint32_t		align_mask;
-	uint32_t		ptr_mask;
-	bool			ready;
-	u32			nop;
-	u32			idx;
-	u32			me;
-	u32			pipe;
-	u32			queue;
-	struct amdgpu_bo	*mqd_obj;
-	u32			doorbell_index;
-	bool			use_doorbell;
-	unsigned		wptr_offs;
-	unsigned		fence_offs;
-	uint64_t		current_ctx;
-	enum amdgpu_ring_type	type;
-	char			name[16];
-	unsigned		cond_exe_offs;
-	u64			cond_exe_gpu_addr;
-	volatile u32		*cond_exe_cpu_addr;
-#if defined(CONFIG_DEBUG_FS)
-	struct dentry *ent;
-#endif
-};
-
-/*
- * VM
- */
-
-/* maximum number of VMIDs */
-#define AMDGPU_NUM_VM	16
-
-/* Maximum number of PTEs the hardware can write with one command */
-#define AMDGPU_VM_MAX_UPDATE_SIZE	0x3FFFF
-
-/* number of entries in page table */
-#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
-
-/* PTBs (Page Table Blocks) need to be aligned to 32K */
-#define AMDGPU_VM_PTB_ALIGN_SIZE   32768
-
-/* LOG2 number of continuous pages for the fragment field */
-#define AMDGPU_LOG2_PAGES_PER_FRAG 4
-
-#define AMDGPU_PTE_VALID	(1 << 0)
-#define AMDGPU_PTE_SYSTEM	(1 << 1)
-#define AMDGPU_PTE_SNOOPED	(1 << 2)
-
-/* VI only */
-#define AMDGPU_PTE_EXECUTABLE	(1 << 4)
-
-#define AMDGPU_PTE_READABLE	(1 << 5)
-#define AMDGPU_PTE_WRITEABLE	(1 << 6)
-
-#define AMDGPU_PTE_FRAG(x)	((x & 0x1f) << 7)
-
-/* How to programm VM fault handling */
-#define AMDGPU_VM_FAULT_STOP_NEVER	0
-#define AMDGPU_VM_FAULT_STOP_FIRST	1
-#define AMDGPU_VM_FAULT_STOP_ALWAYS	2
-
-struct amdgpu_vm_pt {
-	struct amdgpu_bo_list_entry	entry;
-	uint64_t			addr;
-	uint64_t			shadow_addr;
-};
-
-struct amdgpu_vm {
-	/* tree of virtual addresses mapped */
-	struct rb_root		va;
-
-	/* protecting invalidated */
-	spinlock_t		status_lock;
-
-	/* BOs moved, but not yet updated in the PT */
-	struct list_head	invalidated;
-
-	/* BOs cleared in the PT because of a move */
-	struct list_head	cleared;
-
-	/* BO mappings freed, but not yet updated in the PT */
-	struct list_head	freed;
-
-	/* contains the page directory */
-	struct amdgpu_bo	*page_directory;
-	unsigned		max_pde_used;
-	struct fence		*page_directory_fence;
-	uint64_t		last_eviction_counter;
-
-	/* array of page tables, one for each page directory entry */
-	struct amdgpu_vm_pt	*page_tables;
-
-	/* for id and flush management per ring */
-	struct amdgpu_vm_id	*ids[AMDGPU_MAX_RINGS];
-
-	/* protecting freed */
-	spinlock_t		freed_lock;
-
-	/* Scheduler entity for page table updates */
-	struct amd_sched_entity	entity;
-
-	/* client id */
-	u64                     client_id;
-};
-
-struct amdgpu_vm_id {
-	struct list_head	list;
-	struct fence		*first;
-	struct amdgpu_sync	active;
-	struct fence		*last_flush;
-	atomic64_t		owner;
-
-	uint64_t		pd_gpu_addr;
-	/* last flushed PD/PT update */
-	struct fence		*flushed_updates;
-
-	uint32_t                current_gpu_reset_count;
-
-	uint32_t		gds_base;
-	uint32_t		gds_size;
-	uint32_t		gws_base;
-	uint32_t		gws_size;
-	uint32_t		oa_base;
-	uint32_t		oa_size;
-};
-
-struct amdgpu_vm_manager {
-	/* Handling of VMIDs */
-	struct mutex				lock;
-	unsigned				num_ids;
-	struct list_head			ids_lru;
-	struct amdgpu_vm_id			ids[AMDGPU_NUM_VM];
-
-	/* Handling of VM fences */
-	u64					fence_context;
-	unsigned				seqno[AMDGPU_MAX_RINGS];
-
-	uint32_t				max_pfn;
-	/* vram base address for page table entry  */
-	u64					vram_base_offset;
-	/* is vm enabled? */
-	bool					enabled;
-	/* vm pte handling */
-	const struct amdgpu_vm_pte_funcs        *vm_pte_funcs;
-	struct amdgpu_ring                      *vm_pte_rings[AMDGPU_MAX_RINGS];
-	unsigned				vm_pte_num_rings;
-	atomic_t				vm_pte_next_ring;
-	/* client id counter */
-	atomic64_t				client_counter;
-};
-
-void amdgpu_vm_manager_init(struct amdgpu_device *adev);
-void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
-int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
-			 struct list_head *validated,
-			 struct amdgpu_bo_list_entry *entry);
-void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-			  struct list_head *duplicates);
-void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
-				  struct amdgpu_vm *vm);
-int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
-		      struct amdgpu_sync *sync, struct fence *fence,
-		      struct amdgpu_job *job);
-int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
-void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
-				    struct amdgpu_vm *vm);
-int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
-			  struct amdgpu_vm *vm);
-int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-			     struct amdgpu_sync *sync);
-int amdgpu_vm_bo_update(struct amdgpu_device *adev,
-			struct amdgpu_bo_va *bo_va,
-			bool clear);
-void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
-			     struct amdgpu_bo *bo);
-struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
-				       struct amdgpu_bo *bo);
-struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
-				      struct amdgpu_vm *vm,
-				      struct amdgpu_bo *bo);
-int amdgpu_vm_bo_map(struct amdgpu_device *adev,
-		     struct amdgpu_bo_va *bo_va,
-		     uint64_t addr, uint64_t offset,
-		     uint64_t size, uint32_t flags);
-int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
-		       struct amdgpu_bo_va *bo_va,
-		       uint64_t addr);
-void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
-		      struct amdgpu_bo_va *bo_va);
-
 /*
  * context related structures
  */
@@ -1093,6 +788,16 @@ struct amdgpu_scratch {
 /*
  * GFX configurations
  */
+#define AMDGPU_GFX_MAX_SE 4
+#define AMDGPU_GFX_MAX_SH_PER_SE 2
+
+struct amdgpu_rb_config {
+	uint32_t rb_backend_disable;
+	uint32_t user_rb_backend_disable;
+	uint32_t raster_config;
+	uint32_t raster_config_1;
+};
+
 struct amdgpu_gca_config {
 	unsigned max_shader_engines;
 	unsigned max_tile_pipes;
@@ -1121,6 +826,8 @@ struct amdgpu_gca_config {
 
 	uint32_t tile_mode_array[32];
 	uint32_t macrotile_mode_array[16];
+
+	struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE];
 };
 
 struct amdgpu_cu_info {
@@ -1133,6 +840,7 @@ struct amdgpu_gfx_funcs {
 	/* get the gpu clock counter */
 	uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
 	void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
+	void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields);
 };
 
 struct amdgpu_gfx {
@@ -1188,16 +896,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 int amdgpu_ib_pool_init(struct amdgpu_device *adev);
 void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
 int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
-int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
-void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
-void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
-void amdgpu_ring_commit(struct amdgpu_ring *ring);
-void amdgpu_ring_undo(struct amdgpu_ring *ring);
-int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
-		     unsigned ring_size, u32 nop, u32 align_mask,
-		     struct amdgpu_irq_src *irq_src, unsigned irq_type,
-		     enum amdgpu_ring_type ring_type);
-void amdgpu_ring_fini(struct amdgpu_ring *ring);
 
 /*
  * CS.
@@ -1294,354 +992,6 @@ struct amdgpu_wb {
 int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
 void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
 
-
-
-enum amdgpu_int_thermal_type {
-	THERMAL_TYPE_NONE,
-	THERMAL_TYPE_EXTERNAL,
-	THERMAL_TYPE_EXTERNAL_GPIO,
-	THERMAL_TYPE_RV6XX,
-	THERMAL_TYPE_RV770,
-	THERMAL_TYPE_ADT7473_WITH_INTERNAL,
-	THERMAL_TYPE_EVERGREEN,
-	THERMAL_TYPE_SUMO,
-	THERMAL_TYPE_NI,
-	THERMAL_TYPE_SI,
-	THERMAL_TYPE_EMC2103_WITH_INTERNAL,
-	THERMAL_TYPE_CI,
-	THERMAL_TYPE_KV,
-};
-
-enum amdgpu_dpm_auto_throttle_src {
-	AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
-	AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
-};
-
-enum amdgpu_dpm_event_src {
-	AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
-	AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
-	AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
-	AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
-	AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
-};
-
-#define AMDGPU_MAX_VCE_LEVELS 6
-
-enum amdgpu_vce_level {
-	AMDGPU_VCE_LEVEL_AC_ALL = 0,     /* AC, All cases */
-	AMDGPU_VCE_LEVEL_DC_EE = 1,      /* DC, entropy encoding */
-	AMDGPU_VCE_LEVEL_DC_LL_LOW = 2,  /* DC, low latency queue, res <= 720 */
-	AMDGPU_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
-	AMDGPU_VCE_LEVEL_DC_GP_LOW = 4,  /* DC, general purpose queue, res <= 720 */
-	AMDGPU_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
-};
-
-struct amdgpu_ps {
-	u32 caps; /* vbios flags */
-	u32 class; /* vbios flags */
-	u32 class2; /* vbios flags */
-	/* UVD clocks */
-	u32 vclk;
-	u32 dclk;
-	/* VCE clocks */
-	u32 evclk;
-	u32 ecclk;
-	bool vce_active;
-	enum amdgpu_vce_level vce_level;
-	/* asic priv */
-	void *ps_priv;
-};
-
-struct amdgpu_dpm_thermal {
-	/* thermal interrupt work */
-	struct work_struct work;
-	/* low temperature threshold */
-	int                min_temp;
-	/* high temperature threshold */
-	int                max_temp;
-	/* was last interrupt low to high or high to low */
-	bool               high_to_low;
-	/* interrupt source */
-	struct amdgpu_irq_src	irq;
-};
-
-enum amdgpu_clk_action
-{
-	AMDGPU_SCLK_UP = 1,
-	AMDGPU_SCLK_DOWN
-};
-
-struct amdgpu_blacklist_clocks
-{
-	u32 sclk;
-	u32 mclk;
-	enum amdgpu_clk_action action;
-};
-
-struct amdgpu_clock_and_voltage_limits {
-	u32 sclk;
-	u32 mclk;
-	u16 vddc;
-	u16 vddci;
-};
-
-struct amdgpu_clock_array {
-	u32 count;
-	u32 *values;
-};
-
-struct amdgpu_clock_voltage_dependency_entry {
-	u32 clk;
-	u16 v;
-};
-
-struct amdgpu_clock_voltage_dependency_table {
-	u32 count;
-	struct amdgpu_clock_voltage_dependency_entry *entries;
-};
-
-union amdgpu_cac_leakage_entry {
-	struct {
-		u16 vddc;
-		u32 leakage;
-	};
-	struct {
-		u16 vddc1;
-		u16 vddc2;
-		u16 vddc3;
-	};
-};
-
-struct amdgpu_cac_leakage_table {
-	u32 count;
-	union amdgpu_cac_leakage_entry *entries;
-};
-
-struct amdgpu_phase_shedding_limits_entry {
-	u16 voltage;
-	u32 sclk;
-	u32 mclk;
-};
-
-struct amdgpu_phase_shedding_limits_table {
-	u32 count;
-	struct amdgpu_phase_shedding_limits_entry *entries;
-};
-
-struct amdgpu_uvd_clock_voltage_dependency_entry {
-	u32 vclk;
-	u32 dclk;
-	u16 v;
-};
-
-struct amdgpu_uvd_clock_voltage_dependency_table {
-	u8 count;
-	struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
-};
-
-struct amdgpu_vce_clock_voltage_dependency_entry {
-	u32 ecclk;
-	u32 evclk;
-	u16 v;
-};
-
-struct amdgpu_vce_clock_voltage_dependency_table {
-	u8 count;
-	struct amdgpu_vce_clock_voltage_dependency_entry *entries;
-};
-
-struct amdgpu_ppm_table {
-	u8 ppm_design;
-	u16 cpu_core_number;
-	u32 platform_tdp;
-	u32 small_ac_platform_tdp;
-	u32 platform_tdc;
-	u32 small_ac_platform_tdc;
-	u32 apu_tdp;
-	u32 dgpu_tdp;
-	u32 dgpu_ulv_power;
-	u32 tj_max;
-};
-
-struct amdgpu_cac_tdp_table {
-	u16 tdp;
-	u16 configurable_tdp;
-	u16 tdc;
-	u16 battery_power_limit;
-	u16 small_power_limit;
-	u16 low_cac_leakage;
-	u16 high_cac_leakage;
-	u16 maximum_power_delivery_limit;
-};
-
-struct amdgpu_dpm_dynamic_state {
-	struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
-	struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
-	struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
-	struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
-	struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
-	struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
-	struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
-	struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
-	struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
-	struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
-	struct amdgpu_clock_array valid_sclk_values;
-	struct amdgpu_clock_array valid_mclk_values;
-	struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
-	struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
-	u32 mclk_sclk_ratio;
-	u32 sclk_mclk_delta;
-	u16 vddc_vddci_delta;
-	u16 min_vddc_for_pcie_gen2;
-	struct amdgpu_cac_leakage_table cac_leakage_table;
-	struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
-	struct amdgpu_ppm_table *ppm_table;
-	struct amdgpu_cac_tdp_table *cac_tdp_table;
-};
-
-struct amdgpu_dpm_fan {
-	u16 t_min;
-	u16 t_med;
-	u16 t_high;
-	u16 pwm_min;
-	u16 pwm_med;
-	u16 pwm_high;
-	u8 t_hyst;
-	u32 cycle_delay;
-	u16 t_max;
-	u8 control_mode;
-	u16 default_max_fan_pwm;
-	u16 default_fan_output_sensitivity;
-	u16 fan_output_sensitivity;
-	bool ucode_fan_control;
-};
-
-enum amdgpu_pcie_gen {
-	AMDGPU_PCIE_GEN1 = 0,
-	AMDGPU_PCIE_GEN2 = 1,
-	AMDGPU_PCIE_GEN3 = 2,
-	AMDGPU_PCIE_GEN_INVALID = 0xffff
-};
-
-enum amdgpu_dpm_forced_level {
-	AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
-	AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
-	AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
-	AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3,
-};
-
-struct amdgpu_vce_state {
-	/* vce clocks */
-	u32 evclk;
-	u32 ecclk;
-	/* gpu clocks */
-	u32 sclk;
-	u32 mclk;
-	u8 clk_idx;
-	u8 pstate;
-};
-
-struct amdgpu_dpm_funcs {
-	int (*get_temperature)(struct amdgpu_device *adev);
-	int (*pre_set_power_state)(struct amdgpu_device *adev);
-	int (*set_power_state)(struct amdgpu_device *adev);
-	void (*post_set_power_state)(struct amdgpu_device *adev);
-	void (*display_configuration_changed)(struct amdgpu_device *adev);
-	u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
-	u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
-	void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
-	void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
-	int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
-	bool (*vblank_too_short)(struct amdgpu_device *adev);
-	void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
-	void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
-	void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
-	void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
-	u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
-	int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
-	int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
-	int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
-	int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
-	int (*get_sclk_od)(struct amdgpu_device *adev);
-	int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
-	int (*get_mclk_od)(struct amdgpu_device *adev);
-	int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
-};
-
-struct amdgpu_dpm {
-	struct amdgpu_ps        *ps;
-	/* number of valid power states */
-	int                     num_ps;
-	/* current power state that is active */
-	struct amdgpu_ps        *current_ps;
-	/* requested power state */
-	struct amdgpu_ps        *requested_ps;
-	/* boot up power state */
-	struct amdgpu_ps        *boot_ps;
-	/* default uvd power state */
-	struct amdgpu_ps        *uvd_ps;
-	/* vce requirements */
-	struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS];
-	enum amdgpu_vce_level vce_level;
-	enum amd_pm_state_type state;
-	enum amd_pm_state_type user_state;
-	u32                     platform_caps;
-	u32                     voltage_response_time;
-	u32                     backbias_response_time;
-	void                    *priv;
-	u32			new_active_crtcs;
-	int			new_active_crtc_count;
-	u32			current_active_crtcs;
-	int			current_active_crtc_count;
-	struct amdgpu_dpm_dynamic_state dyn_state;
-	struct amdgpu_dpm_fan fan;
-	u32 tdp_limit;
-	u32 near_tdp_limit;
-	u32 near_tdp_limit_adjusted;
-	u32 sq_ramping_threshold;
-	u32 cac_leakage;
-	u16 tdp_od_limit;
-	u32 tdp_adjustment;
-	u16 load_line_slope;
-	bool power_control;
-	bool ac_power;
-	/* special states active */
-	bool                    thermal_active;
-	bool                    uvd_active;
-	bool                    vce_active;
-	/* thermal handling */
-	struct amdgpu_dpm_thermal thermal;
-	/* forced levels */
-	enum amdgpu_dpm_forced_level forced_level;
-};
-
-struct amdgpu_pm {
-	struct mutex		mutex;
-	u32                     current_sclk;
-	u32                     current_mclk;
-	u32                     default_sclk;
-	u32                     default_mclk;
-	struct amdgpu_i2c_chan *i2c_bus;
-	/* internal thermal controller on rv6xx+ */
-	enum amdgpu_int_thermal_type int_thermal_type;
-	struct device	        *int_hwmon_dev;
-	/* fan control parameters */
-	bool                    no_fan;
-	u8                      fan_pulses_per_revolution;
-	u8                      fan_min_rpm;
-	u8                      fan_max_rpm;
-	/* dpm */
-	bool                    dpm_enabled;
-	bool                    sysfs_initialized;
-	struct amdgpu_dpm       dpm;
-	const struct firmware	*fw;	/* SMC firmware */
-	uint32_t                fw_version;
-	const struct amdgpu_dpm_funcs *funcs;
-	uint32_t                pcie_gen_mask;
-	uint32_t                pcie_mlw_mask;
-	struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
-};
-
 void amdgpu_get_pcie_info(struct amdgpu_device *adev);
 
 /*
@@ -1939,14 +1289,6 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
 typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
 typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
 
-struct amdgpu_ip_block_status {
-	bool valid;
-	bool sw;
-	bool hw;
-	bool late_initialized;
-	bool hang;
-};
-
 struct amdgpu_device {
 	struct device			*dev;
 	struct drm_device		*ddev;
@@ -2102,9 +1444,8 @@ struct amdgpu_device {
 	/* GDS */
 	struct amdgpu_gds		gds;
 
-	const struct amdgpu_ip_block_version *ip_blocks;
+	struct amdgpu_ip_block          ip_blocks[AMDGPU_MAX_IP_NUM];
 	int				num_ip_blocks;
-	struct amdgpu_ip_block_status	*ip_block_status;
 	struct mutex	mn_lock;
 	DECLARE_HASHTABLE(mn_hash, 7);
 
@@ -2127,6 +1468,11 @@ struct amdgpu_device {
 
 };
 
+static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
+{
+	return container_of(bdev, struct amdgpu_device, mman.bdev);
+}
+
 bool amdgpu_device_is_px(struct drm_device *dev);
 int amdgpu_device_init(struct amdgpu_device *adev,
 		       struct drm_device *ddev,
@@ -2278,8 +1624,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
 #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
 #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
-#define amdgpu_ring_get_emit_ib_size(r) (r)->funcs->get_emit_ib_size((r))
-#define amdgpu_ring_get_dma_frame_size(r) (r)->funcs->get_dma_frame_size((r))
 #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
 #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
 #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
@@ -2301,108 +1645,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
 #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib),  (s), (d), (b))
 #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
-#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
-#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
-#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
-#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
-#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
-#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
-#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
 #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
 #define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
-
-#define amdgpu_dpm_read_sensor(adev, idx, value) \
-	((adev)->pp_enabled ? \
-		(adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \
-		-EINVAL)
-
-#define amdgpu_dpm_get_temperature(adev) \
-	((adev)->pp_enabled ?						\
-	      (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
-	      (adev)->pm.funcs->get_temperature((adev)))
-
-#define amdgpu_dpm_set_fan_control_mode(adev, m) \
-	((adev)->pp_enabled ?						\
-	      (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
-	      (adev)->pm.funcs->set_fan_control_mode((adev), (m)))
-
-#define amdgpu_dpm_get_fan_control_mode(adev) \
-	((adev)->pp_enabled ?						\
-	      (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
-	      (adev)->pm.funcs->get_fan_control_mode((adev)))
-
-#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
-	((adev)->pp_enabled ?						\
-	      (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
-	      (adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
-
-#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
-	((adev)->pp_enabled ?						\
-	      (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
-	      (adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
-
-#define amdgpu_dpm_get_sclk(adev, l) \
-	((adev)->pp_enabled ?						\
-	      (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
-		(adev)->pm.funcs->get_sclk((adev), (l)))
-
-#define amdgpu_dpm_get_mclk(adev, l)  \
-	((adev)->pp_enabled ?						\
-	      (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
-	      (adev)->pm.funcs->get_mclk((adev), (l)))
-
-
-#define amdgpu_dpm_force_performance_level(adev, l) \
-	((adev)->pp_enabled ?						\
-	      (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
-	      (adev)->pm.funcs->force_performance_level((adev), (l)))
-
-#define amdgpu_dpm_powergate_uvd(adev, g) \
-	((adev)->pp_enabled ?						\
-	      (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
-	      (adev)->pm.funcs->powergate_uvd((adev), (g)))
-
-#define amdgpu_dpm_powergate_vce(adev, g) \
-	((adev)->pp_enabled ?						\
-	      (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
-	      (adev)->pm.funcs->powergate_vce((adev), (g)))
-
-#define amdgpu_dpm_get_current_power_state(adev) \
-	(adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
-
-#define amdgpu_dpm_get_performance_level(adev) \
-	(adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)
-
-#define amdgpu_dpm_get_pp_num_states(adev, data) \
-	(adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
-
-#define amdgpu_dpm_get_pp_table(adev, table) \
-	(adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
-
-#define amdgpu_dpm_set_pp_table(adev, buf, size) \
-	(adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
-
-#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
-	(adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
-
-#define amdgpu_dpm_force_clock_level(adev, type, level) \
-		(adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
-
-#define amdgpu_dpm_get_sclk_od(adev) \
-	(adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
-
-#define amdgpu_dpm_set_sclk_od(adev, value) \
-	(adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
-
-#define amdgpu_dpm_get_mclk_od(adev) \
-	((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_set_mclk_od(adev, value) \
-	((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
-
-#define amdgpu_dpm_dispatch_task(adev, event_id, input, output)		\
-	(adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
-
 #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
 
 /* Common functions */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 892d60fb225b56b25d7a44edec959ab56f45ed90..2f9f96cc9f65bad601cd11b970b976d170146d95 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -265,14 +265,14 @@ static int acp_hw_init(void *handle)
 
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	const struct amdgpu_ip_block_version *ip_version =
+	const struct amdgpu_ip_block *ip_block =
 		amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
 
-	if (!ip_version)
+	if (!ip_block)
 		return -EINVAL;
 
 	r = amd_acp_hw_init(adev->acp.cgs_device,
-			    ip_version->major, ip_version->minor);
+			    ip_block->version->major, ip_block->version->minor);
 	/* -ENODEV means board uses AZ rather than ACP */
 	if (r == -ENODEV)
 		return 0;
@@ -456,7 +456,7 @@ static int acp_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs acp_ip_funcs = {
+static const struct amd_ip_funcs acp_ip_funcs = {
 	.name = "acp_ip",
 	.early_init = acp_early_init,
 	.late_init = NULL,
@@ -472,3 +472,12 @@ const struct amd_ip_funcs acp_ip_funcs = {
 	.set_clockgating_state = acp_set_clockgating_state,
 	.set_powergating_state = acp_set_powergating_state,
 };
+
+const struct amdgpu_ip_block_version acp_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_ACP,
+	.major = 2,
+	.minor = 2,
+	.rev = 0,
+	.funcs = &acp_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
index 8a396313c86f8fc7b8a2bc2c7617a167f401e201..a288ce25c176528a792bb1b751453a27bddc486b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
@@ -37,6 +37,6 @@ struct amdgpu_acp {
 	struct acp_pm_domain *acp_genpd;
 };
 
-extern const struct amd_ip_funcs acp_ip_funcs;
+extern const struct amdgpu_ip_block_version acp_ip_block;
 
 #endif /* __AMDGPU_ACP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 8e6bf548d68907f871952f0109186a03373cdd5f..56a86dd5789e967abb9f13a0a7f78fb3f519b2ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1115,49 +1115,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
 	return 0;
 }
 
-uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev)
-{
-	GET_ENGINE_CLOCK_PS_ALLOCATION args;
-	int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
-
-	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-	return le32_to_cpu(args.ulReturnEngineClock);
-}
-
-uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev)
-{
-	GET_MEMORY_CLOCK_PS_ALLOCATION args;
-	int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
-
-	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-	return le32_to_cpu(args.ulReturnMemoryClock);
-}
-
-void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev,
-				      uint32_t eng_clock)
-{
-	SET_ENGINE_CLOCK_PS_ALLOCATION args;
-	int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
-
-	args.ulTargetEngineClock = cpu_to_le32(eng_clock);	/* 10 khz */
-
-	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
-void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
-				      uint32_t mem_clock)
-{
-	SET_MEMORY_CLOCK_PS_ALLOCATION args;
-	int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock);
-
-	if (adev->flags & AMD_IS_APU)
-		return;
-
-	args.ulTargetMemoryClock = cpu_to_le32(mem_clock);	/* 10 khz */
-
-	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
 void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
 					     u32 eng_clock, u32 mem_clock)
 {
@@ -1256,45 +1213,6 @@ int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *
 	return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
 }
 
-void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
-				 u16 voltage_level,
-				 u8 voltage_type)
-{
-	union set_voltage args;
-	int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
-	u8 frev, crev, volt_index = voltage_level;
-
-	if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
-		return;
-
-	/* 0xff01 is a flag rather then an actual voltage */
-	if (voltage_level == 0xff01)
-		return;
-
-	switch (crev) {
-	case 1:
-		args.v1.ucVoltageType = voltage_type;
-		args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
-		args.v1.ucVoltageIndex = volt_index;
-		break;
-	case 2:
-		args.v2.ucVoltageType = voltage_type;
-		args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
-		args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
-		break;
-	case 3:
-		args.v3.ucVoltageType = voltage_type;
-		args.v3.ucVoltageMode = ATOM_SET_VOLTAGE;
-		args.v3.usVoltageLevel = cpu_to_le16(voltage_level);
-		break;
-	default:
-		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
-		return;
-	}
-
-	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
 int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev,
 					      u16 *leakage_id)
 {
@@ -1784,6 +1702,19 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
 		WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]);
 }
 
+void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
+					      bool hung)
+{
+	u32 tmp = RREG32(mmBIOS_SCRATCH_3);
+
+	if (hung)
+		tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+	else
+		tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+
+	WREG32(mmBIOS_SCRATCH_3, tmp);
+}
+
 /* Atom needs data in little endian format
  * so swap as appropriate when copying data to
  * or from atom. Note that atom operates on
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 17356151db38a9ca91ed25cf1a3450c91352ba7f..70e9acef5d9c67ac228e173cd1beece437b5e82d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -163,16 +163,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
 					    bool strobe_mode,
 					    struct atom_mpll_param *mpll_param);
 
-uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev);
-uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev);
-void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev,
-				      uint32_t eng_clock);
-void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
-				      uint32_t mem_clock);
-void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
-				 u16 voltage_level,
-				 u8 voltage_type);
-
 void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
 					     u32 eng_clock, u32 mem_clock);
 
@@ -206,6 +196,8 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock);
 void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev);
 void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
 void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev);
+void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
+					      bool hung);
 
 void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
 int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 7a8bfa34682fdd8a92959d43328a7de058b42b7f..017556ca22e6c23cb26a5c66a78e78802eb6a300 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -146,7 +146,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
 	switch(type) {
 	case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
 	case CGS_GPU_MEM_TYPE__VISIBLE_FB:
-		flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+		flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+			AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 		domain = AMDGPU_GEM_DOMAIN_VRAM;
 		if (max_offset > adev->mc.real_vram_size)
 			return -EINVAL;
@@ -157,7 +158,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
 		break;
 	case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
 	case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
-		flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+		flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+			AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 		domain = AMDGPU_GEM_DOMAIN_VRAM;
 		if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
 			place.fpfn =
@@ -240,7 +242,7 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
 	r = amdgpu_bo_reserve(obj, false);
 	if (unlikely(r != 0))
 		return r;
-	r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT,
+	r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains,
 				     min_offset, max_offset, mcaddr);
 	amdgpu_bo_unreserve(obj);
 	return r;
@@ -624,11 +626,11 @@ static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
 	int i, r = -1;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
 
-		if (adev->ip_blocks[i].type == block_type) {
-			r = adev->ip_blocks[i].funcs->set_clockgating_state(
+		if (adev->ip_blocks[i].version->type == block_type) {
+			r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
 								(void *)adev,
 									state);
 			break;
@@ -645,11 +647,11 @@ static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
 	int i, r = -1;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
 
-		if (adev->ip_blocks[i].type == block_type) {
-			r = adev->ip_blocks[i].funcs->set_powergating_state(
+		if (adev->ip_blocks[i].version->type == block_type) {
+			r = adev->ip_blocks[i].version->funcs->set_powergating_state(
 								(void *)adev,
 									state);
 			break;
@@ -685,15 +687,21 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
 		result = AMDGPU_UCODE_ID_CP_MEC1;
 		break;
 	case CGS_UCODE_ID_CP_MEC_JT2:
-		if (adev->asic_type == CHIP_TONGA || adev->asic_type == CHIP_POLARIS11
-		  || adev->asic_type == CHIP_POLARIS10)
-			result = AMDGPU_UCODE_ID_CP_MEC2;
-		else
+		/* for VI. JT2 should be the same as JT1, because:
+			1, MEC2 and MEC1 use exactly same FW.
+			2, JT2 is not pached but JT1 is.
+		*/
+		if (adev->asic_type >= CHIP_TOPAZ)
 			result = AMDGPU_UCODE_ID_CP_MEC1;
+		else
+			result = AMDGPU_UCODE_ID_CP_MEC2;
 		break;
 	case CGS_UCODE_ID_RLC_G:
 		result = AMDGPU_UCODE_ID_RLC_G;
 		break;
+	case CGS_UCODE_ID_STORAGE:
+		result = AMDGPU_UCODE_ID_STORAGE;
+		break;
 	default:
 		DRM_ERROR("Firmware type not supported\n");
 	}
@@ -776,12 +784,18 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
 
 		if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
 		    (type == CGS_UCODE_ID_CP_MEC_JT2)) {
-			gpu_addr += le32_to_cpu(header->jt_offset) << 2;
+			gpu_addr += ALIGN(le32_to_cpu(header->header.ucode_size_bytes), PAGE_SIZE);
 			data_size = le32_to_cpu(header->jt_size) << 2;
 		}
-		info->mc_addr = gpu_addr;
+
+		info->kptr = ucode->kaddr;
 		info->image_size = data_size;
+		info->mc_addr = gpu_addr;
 		info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
+
+		if (CGS_UCODE_ID_CP_MEC == type)
+			info->image_size = (header->jt_offset) << 2;
+
 		info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
 		info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
 	} else {
@@ -851,6 +865,12 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
 	return 0;
 }
 
+static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
+{
+	CGS_FUNC_ADEV;
+	return amdgpu_sriov_vf(adev);
+}
+
 static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
 					struct cgs_system_info *sys_info)
 {
@@ -1204,6 +1224,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
 	amdgpu_cgs_notify_dpm_enabled,
 	amdgpu_cgs_call_acpi_method,
 	amdgpu_cgs_query_system_info,
+	amdgpu_cgs_is_virtualization_enabled
 };
 
 static const struct cgs_os_ops amdgpu_cgs_os_ops = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index e3281d4e3e414cc9d693932911215c6fc7216d6f..3af8ffb45b64d9d26ab885ef3f7eb5f354462ba5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1517,88 +1517,6 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
 	.force = amdgpu_connector_dvi_force,
 };
 
-static struct drm_encoder *
-amdgpu_connector_virtual_encoder(struct drm_connector *connector)
-{
-	int enc_id = connector->encoder_ids[0];
-	struct drm_encoder *encoder;
-	int i;
-	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-		if (connector->encoder_ids[i] == 0)
-			break;
-
-		encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
-		if (!encoder)
-			continue;
-
-		if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
-			return encoder;
-	}
-
-	/* pick the first one */
-	if (enc_id)
-		return drm_encoder_find(connector->dev, enc_id);
-	return NULL;
-}
-
-static int amdgpu_connector_virtual_get_modes(struct drm_connector *connector)
-{
-	struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
-
-	if (encoder) {
-		amdgpu_connector_add_common_modes(encoder, connector);
-	}
-
-	return 0;
-}
-
-static int amdgpu_connector_virtual_mode_valid(struct drm_connector *connector,
-					   struct drm_display_mode *mode)
-{
-	return MODE_OK;
-}
-
-static int
-amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode)
-{
-	return 0;
-}
-
-static enum drm_connector_status
-
-amdgpu_connector_virtual_detect(struct drm_connector *connector, bool force)
-{
-	return connector_status_connected;
-}
-
-static int
-amdgpu_connector_virtual_set_property(struct drm_connector *connector,
-				  struct drm_property *property,
-				  uint64_t val)
-{
-	return 0;
-}
-
-static void amdgpu_connector_virtual_force(struct drm_connector *connector)
-{
-	return;
-}
-
-static const struct drm_connector_helper_funcs amdgpu_connector_virtual_helper_funcs = {
-	.get_modes = amdgpu_connector_virtual_get_modes,
-	.mode_valid = amdgpu_connector_virtual_mode_valid,
-	.best_encoder = amdgpu_connector_virtual_encoder,
-};
-
-static const struct drm_connector_funcs amdgpu_connector_virtual_funcs = {
-	.dpms = amdgpu_connector_virtual_dpms,
-	.detect = amdgpu_connector_virtual_detect,
-	.fill_modes = drm_helper_probe_single_connector_modes,
-	.set_property = amdgpu_connector_virtual_set_property,
-	.destroy = amdgpu_connector_destroy,
-	.force = amdgpu_connector_virtual_force,
-};
-
 void
 amdgpu_connector_add(struct amdgpu_device *adev,
 		      uint32_t connector_id,
@@ -1983,17 +1901,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
 			connector->interlace_allowed = false;
 			connector->doublescan_allowed = false;
 			break;
-		case DRM_MODE_CONNECTOR_VIRTUAL:
-			amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
-			if (!amdgpu_dig_connector)
-				goto failed;
-			amdgpu_connector->con_priv = amdgpu_dig_connector;
-			drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_virtual_funcs, connector_type);
-			drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_virtual_helper_funcs);
-			subpixel_order = SubPixelHorizontalRGB;
-			connector->interlace_allowed = false;
-			connector->doublescan_allowed = false;
-			break;
 		}
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index b0f6e6957536a7827acc979856f3ef5280b3ce79..cf03f9f01f4027c3571213600acef4edc5694326 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -355,6 +355,7 @@ static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev,
 static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
 				 struct amdgpu_bo *bo)
 {
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 	u64 initial_bytes_moved;
 	uint32_t domain;
 	int r;
@@ -372,9 +373,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
 
 retry:
 	amdgpu_ttm_placement_from_domain(bo, domain);
-	initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+	initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
 	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
-	p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+	p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
 		initial_bytes_moved;
 
 	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
@@ -387,9 +388,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
 
 /* Last resort, try to evict something from the current working set */
 static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
-				struct amdgpu_bo_list_entry *lobj)
+				struct amdgpu_bo *validated)
 {
-	uint32_t domain = lobj->robj->allowed_domains;
+	uint32_t domain = validated->allowed_domains;
 	int r;
 
 	if (!p->evictable)
@@ -400,11 +401,12 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
 
 		struct amdgpu_bo_list_entry *candidate = p->evictable;
 		struct amdgpu_bo *bo = candidate->robj;
+		struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 		u64 initial_bytes_moved;
 		uint32_t other;
 
 		/* If we reached our current BO we can forget it */
-		if (candidate == lobj)
+		if (candidate->robj == validated)
 			break;
 
 		other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
@@ -420,9 +422,9 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
 
 		/* Good we can try to move this BO somewhere else */
 		amdgpu_ttm_placement_from_domain(bo, other);
-		initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+		initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
 		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
-		p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+		p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
 			initial_bytes_moved;
 
 		if (unlikely(r))
@@ -437,6 +439,23 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
 	return false;
 }
 
+static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
+{
+	struct amdgpu_cs_parser *p = param;
+	int r;
+
+	do {
+		r = amdgpu_cs_bo_validate(p, bo);
+	} while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
+	if (r)
+		return r;
+
+	if (bo->shadow)
+		r = amdgpu_cs_bo_validate(p, bo);
+
+	return r;
+}
+
 static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
 			    struct list_head *validated)
 {
@@ -464,18 +483,10 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
 		if (p->evictable == lobj)
 			p->evictable = NULL;
 
-		do {
-			r = amdgpu_cs_bo_validate(p, bo);
-		} while (r == -ENOMEM && amdgpu_cs_try_evict(p, lobj));
+		r = amdgpu_cs_validate(p, bo);
 		if (r)
 			return r;
 
-		if (bo->shadow) {
-			r = amdgpu_cs_bo_validate(p, bo);
-			if (r)
-				return r;
-		}
-
 		if (binding_userptr) {
 			drm_free_large(lobj->user_pages);
 			lobj->user_pages = NULL;
@@ -593,14 +604,19 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 		list_splice(&need_pages, &p->validated);
 	}
 
-	amdgpu_vm_get_pt_bos(p->adev, &fpriv->vm, &duplicates);
-
 	p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
 	p->bytes_moved = 0;
 	p->evictable = list_last_entry(&p->validated,
 				       struct amdgpu_bo_list_entry,
 				       tv.head);
 
+	r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
+				      amdgpu_cs_validate, p);
+	if (r) {
+		DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
+		goto error_validate;
+	}
+
 	r = amdgpu_cs_list_validate(p, &duplicates);
 	if (r) {
 		DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
@@ -806,13 +822,14 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
 
 	/* Only for UVD/VCE VM emulation */
 	if (ring->funcs->parse_cs) {
-		p->job->vm = NULL;
 		for (i = 0; i < p->job->num_ibs; i++) {
 			r = amdgpu_ring_parse_cs(ring, p, i);
 			if (r)
 				return r;
 		}
-	} else {
+	}
+
+	if (p->job->vm) {
 		p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
 
 		r = amdgpu_bo_vm_update_pte(p, vm);
@@ -901,7 +918,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
 			offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE;
 			kptr += chunk_ib->va_start - offset;
 
-			r =  amdgpu_ib_get(adev, NULL, chunk_ib->ib_bytes, ib);
+			r =  amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib);
 			if (r) {
 				DRM_ERROR("Failed to get ib !\n");
 				return r;
@@ -916,9 +933,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
 				return r;
 			}
 
-			ib->gpu_addr = chunk_ib->va_start;
 		}
 
+		ib->gpu_addr = chunk_ib->va_start;
 		ib->length_dw = chunk_ib->ib_bytes / 4;
 		ib->flags = chunk_ib->flags;
 		j++;
@@ -926,8 +943,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
 
 	/* UVD & VCE fw doesn't support user fences */
 	if (parser->job->uf_addr && (
-	    parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
-	    parser->job->ring->type == AMDGPU_RING_TYPE_VCE))
+	    parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
+	    parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
 		return -EINVAL;
 
 	return 0;
@@ -1195,6 +1212,15 @@ int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
 		r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
 		if (unlikely(r))
 			return r;
+
+		if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
+			continue;
+
+		bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+		amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains);
+		r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+		if (unlikely(r))
+			return r;
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index a5e2fcbef0f0f24f54bcf54164eb610c463edd49..6d86eaef934c4705ffad0b80172a7db47a64d94d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -55,18 +55,18 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
 		r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
 					  rq, amdgpu_sched_jobs);
 		if (r)
-			break;
+			goto failed;
 	}
 
-	if (i < adev->num_rings) {
-		for (j = 0; j < i; j++)
-			amd_sched_entity_fini(&adev->rings[j]->sched,
-					      &ctx->rings[j].entity);
-		kfree(ctx->fences);
-		ctx->fences = NULL;
-		return r;
-	}
 	return 0;
+
+failed:
+	for (j = 0; j < i; j++)
+		amd_sched_entity_fini(&adev->rings[j]->sched,
+				      &ctx->rings[j].entity);
+	kfree(ctx->fences);
+	ctx->fences = NULL;
+	return r;
 }
 
 static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index b4f4a9239069d1a3f073cd89f1355b2e0dd7e985..3b9b58debabd7c0312b2c101030ef61025914fba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -264,7 +264,8 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
 	if (adev->vram_scratch.robj == NULL) {
 		r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
 				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
-				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+				     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 				     NULL, NULL, &adev->vram_scratch.robj);
 		if (r) {
 			return r;
@@ -442,13 +443,9 @@ void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
 static void amdgpu_wb_fini(struct amdgpu_device *adev)
 {
 	if (adev->wb.wb_obj) {
-		if (!amdgpu_bo_reserve(adev->wb.wb_obj, false)) {
-			amdgpu_bo_kunmap(adev->wb.wb_obj);
-			amdgpu_bo_unpin(adev->wb.wb_obj);
-			amdgpu_bo_unreserve(adev->wb.wb_obj);
-		}
-		amdgpu_bo_unref(&adev->wb.wb_obj);
-		adev->wb.wb = NULL;
+		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
+				      &adev->wb.gpu_addr,
+				      (void **)&adev->wb.wb);
 		adev->wb.wb_obj = NULL;
 	}
 }
@@ -467,33 +464,14 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
 	int r;
 
 	if (adev->wb.wb_obj == NULL) {
-		r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true,
-				     AMDGPU_GEM_DOMAIN_GTT, 0,  NULL, NULL,
-				     &adev->wb.wb_obj);
+		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * 4,
+					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
+					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
+					    (void **)&adev->wb.wb);
 		if (r) {
 			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
 			return r;
 		}
-		r = amdgpu_bo_reserve(adev->wb.wb_obj, false);
-		if (unlikely(r != 0)) {
-			amdgpu_wb_fini(adev);
-			return r;
-		}
-		r = amdgpu_bo_pin(adev->wb.wb_obj, AMDGPU_GEM_DOMAIN_GTT,
-				&adev->wb.gpu_addr);
-		if (r) {
-			amdgpu_bo_unreserve(adev->wb.wb_obj);
-			dev_warn(adev->dev, "(%d) pin WB bo failed\n", r);
-			amdgpu_wb_fini(adev);
-			return r;
-		}
-		r = amdgpu_bo_kmap(adev->wb.wb_obj, (void **)&adev->wb.wb);
-		amdgpu_bo_unreserve(adev->wb.wb_obj);
-		if (r) {
-			dev_warn(adev->dev, "(%d) map WB bo failed\n", r);
-			amdgpu_wb_fini(adev);
-			return r;
-		}
 
 		adev->wb.num_wb = AMDGPU_MAX_WB;
 		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
@@ -1051,6 +1029,13 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
 			 amdgpu_vm_block_size);
 		amdgpu_vm_block_size = 9;
 	}
+
+	if ((amdgpu_vram_page_split != -1 && amdgpu_vram_page_split < 16) ||
+	    !amdgpu_check_pot_argument(amdgpu_vram_page_split)) {
+		dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
+			 amdgpu_vram_page_split);
+		amdgpu_vram_page_split = 1024;
+	}
 }
 
 /**
@@ -1125,11 +1110,11 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
 	int i, r = 0;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
-		if (adev->ip_blocks[i].type == block_type) {
-			r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
-									    state);
+		if (adev->ip_blocks[i].version->type == block_type) {
+			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+										     state);
 			if (r)
 				return r;
 			break;
@@ -1145,11 +1130,11 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev,
 	int i, r = 0;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
-		if (adev->ip_blocks[i].type == block_type) {
-			r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev,
-									    state);
+		if (adev->ip_blocks[i].version->type == block_type) {
+			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
+										     state);
 			if (r)
 				return r;
 			break;
@@ -1164,10 +1149,10 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev,
 	int i, r;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
-		if (adev->ip_blocks[i].type == block_type) {
-			r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev);
+		if (adev->ip_blocks[i].version->type == block_type) {
+			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
 			if (r)
 				return r;
 			break;
@@ -1183,23 +1168,22 @@ bool amdgpu_is_idle(struct amdgpu_device *adev,
 	int i;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
-		if (adev->ip_blocks[i].type == block_type)
-			return adev->ip_blocks[i].funcs->is_idle((void *)adev);
+		if (adev->ip_blocks[i].version->type == block_type)
+			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
 	}
 	return true;
 
 }
 
-const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
-					struct amdgpu_device *adev,
-					enum amd_ip_block_type type)
+struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
+					     enum amd_ip_block_type type)
 {
 	int i;
 
 	for (i = 0; i < adev->num_ip_blocks; i++)
-		if (adev->ip_blocks[i].type == type)
+		if (adev->ip_blocks[i].version->type == type)
 			return &adev->ip_blocks[i];
 
 	return NULL;
@@ -1220,38 +1204,75 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
 				enum amd_ip_block_type type,
 				u32 major, u32 minor)
 {
-	const struct amdgpu_ip_block_version *ip_block;
-	ip_block = amdgpu_get_ip_block(adev, type);
+	struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
 
-	if (ip_block && ((ip_block->major > major) ||
-			((ip_block->major == major) &&
-			(ip_block->minor >= minor))))
+	if (ip_block && ((ip_block->version->major > major) ||
+			((ip_block->version->major == major) &&
+			(ip_block->version->minor >= minor))))
 		return 0;
 
 	return 1;
 }
 
-static void amdgpu_whether_enable_virtual_display(struct amdgpu_device *adev)
+/**
+ * amdgpu_ip_block_add
+ *
+ * @adev: amdgpu_device pointer
+ * @ip_block_version: pointer to the IP to add
+ *
+ * Adds the IP block driver information to the collection of IPs
+ * on the asic.
+ */
+int amdgpu_ip_block_add(struct amdgpu_device *adev,
+			const struct amdgpu_ip_block_version *ip_block_version)
+{
+	if (!ip_block_version)
+		return -EINVAL;
+
+	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
+
+	return 0;
+}
+
+static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
 {
 	adev->enable_virtual_display = false;
 
 	if (amdgpu_virtual_display) {
 		struct drm_device *ddev = adev->ddev;
 		const char *pci_address_name = pci_name(ddev->pdev);
-		char *pciaddstr, *pciaddstr_tmp, *pciaddname;
+		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
 
 		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
 		pciaddstr_tmp = pciaddstr;
-		while ((pciaddname = strsep(&pciaddstr_tmp, ";"))) {
+		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
+			pciaddname = strsep(&pciaddname_tmp, ",");
 			if (!strcmp(pci_address_name, pciaddname)) {
+				long num_crtc;
+				int res = -1;
+
 				adev->enable_virtual_display = true;
+
+				if (pciaddname_tmp)
+					res = kstrtol(pciaddname_tmp, 10,
+						      &num_crtc);
+
+				if (!res) {
+					if (num_crtc < 1)
+						num_crtc = 1;
+					if (num_crtc > 6)
+						num_crtc = 6;
+					adev->mode_info.num_crtc = num_crtc;
+				} else {
+					adev->mode_info.num_crtc = 1;
+				}
 				break;
 			}
 		}
 
-		DRM_INFO("virtual display string:%s, %s:virtual_display:%d\n",
-				 amdgpu_virtual_display, pci_address_name,
-				 adev->enable_virtual_display);
+		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
+			 amdgpu_virtual_display, pci_address_name,
+			 adev->enable_virtual_display, adev->mode_info.num_crtc);
 
 		kfree(pciaddstr);
 	}
@@ -1261,7 +1282,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
 {
 	int i, r;
 
-	amdgpu_whether_enable_virtual_display(adev);
+	amdgpu_device_enable_virtual_display(adev);
 
 	switch (adev->asic_type) {
 	case CHIP_TOPAZ:
@@ -1313,33 +1334,24 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
 		return -EINVAL;
 	}
 
-	adev->ip_block_status = kcalloc(adev->num_ip_blocks,
-					sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
-	if (adev->ip_block_status == NULL)
-		return -ENOMEM;
-
-	if (adev->ip_blocks == NULL) {
-		DRM_ERROR("No IP blocks found!\n");
-		return r;
-	}
-
 	for (i = 0; i < adev->num_ip_blocks; i++) {
 		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
 			DRM_ERROR("disabled ip block: %d\n", i);
-			adev->ip_block_status[i].valid = false;
+			adev->ip_blocks[i].status.valid = false;
 		} else {
-			if (adev->ip_blocks[i].funcs->early_init) {
-				r = adev->ip_blocks[i].funcs->early_init((void *)adev);
+			if (adev->ip_blocks[i].version->funcs->early_init) {
+				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
 				if (r == -ENOENT) {
-					adev->ip_block_status[i].valid = false;
+					adev->ip_blocks[i].status.valid = false;
 				} else if (r) {
-					DRM_ERROR("early_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+					DRM_ERROR("early_init of IP block <%s> failed %d\n",
+						  adev->ip_blocks[i].version->funcs->name, r);
 					return r;
 				} else {
-					adev->ip_block_status[i].valid = true;
+					adev->ip_blocks[i].status.valid = true;
 				}
 			} else {
-				adev->ip_block_status[i].valid = true;
+				adev->ip_blocks[i].status.valid = true;
 			}
 		}
 	}
@@ -1355,22 +1367,23 @@ static int amdgpu_init(struct amdgpu_device *adev)
 	int i, r;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
-		r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
+		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
 		if (r) {
-			DRM_ERROR("sw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
+				  adev->ip_blocks[i].version->funcs->name, r);
 			return r;
 		}
-		adev->ip_block_status[i].sw = true;
+		adev->ip_blocks[i].status.sw = true;
 		/* need to do gmc hw init early so we can allocate gpu mem */
-		if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
+		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
 			r = amdgpu_vram_scratch_init(adev);
 			if (r) {
 				DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
 				return r;
 			}
-			r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
+			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
 			if (r) {
 				DRM_ERROR("hw_init %d failed %d\n", i, r);
 				return r;
@@ -1380,22 +1393,23 @@ static int amdgpu_init(struct amdgpu_device *adev)
 				DRM_ERROR("amdgpu_wb_init failed %d\n", r);
 				return r;
 			}
-			adev->ip_block_status[i].hw = true;
+			adev->ip_blocks[i].status.hw = true;
 		}
 	}
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].sw)
+		if (!adev->ip_blocks[i].status.sw)
 			continue;
 		/* gmc hw init is done early */
-		if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
+		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
 			continue;
-		r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
+		r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
 		if (r) {
-			DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
+				  adev->ip_blocks[i].version->funcs->name, r);
 			return r;
 		}
-		adev->ip_block_status[i].hw = true;
+		adev->ip_blocks[i].status.hw = true;
 	}
 
 	return 0;
@@ -1406,25 +1420,26 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
 	int i = 0, r;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
-		if (adev->ip_blocks[i].funcs->late_init) {
-			r = adev->ip_blocks[i].funcs->late_init((void *)adev);
+		if (adev->ip_blocks[i].version->funcs->late_init) {
+			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
 			if (r) {
-				DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+				DRM_ERROR("late_init of IP block <%s> failed %d\n",
+					  adev->ip_blocks[i].version->funcs->name, r);
 				return r;
 			}
-			adev->ip_block_status[i].late_initialized = true;
+			adev->ip_blocks[i].status.late_initialized = true;
 		}
 		/* skip CG for VCE/UVD, it's handled specially */
-		if (adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_UVD &&
-		    adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_VCE) {
+		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
+		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
 			/* enable clockgating to save power */
-			r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
-									    AMD_CG_STATE_GATE);
+			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+										     AMD_CG_STATE_GATE);
 			if (r) {
 				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
-					  adev->ip_blocks[i].funcs->name, r);
+					  adev->ip_blocks[i].version->funcs->name, r);
 				return r;
 			}
 		}
@@ -1439,68 +1454,71 @@ static int amdgpu_fini(struct amdgpu_device *adev)
 
 	/* need to disable SMC first */
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].hw)
+		if (!adev->ip_blocks[i].status.hw)
 			continue;
-		if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) {
+		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
 			/* ungate blocks before hw fini so that we can shutdown the blocks safely */
-			r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
-									    AMD_CG_STATE_UNGATE);
+			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+										     AMD_CG_STATE_UNGATE);
 			if (r) {
 				DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
-					  adev->ip_blocks[i].funcs->name, r);
+					  adev->ip_blocks[i].version->funcs->name, r);
 				return r;
 			}
-			r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
+			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
 			/* XXX handle errors */
 			if (r) {
 				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
-					  adev->ip_blocks[i].funcs->name, r);
+					  adev->ip_blocks[i].version->funcs->name, r);
 			}
-			adev->ip_block_status[i].hw = false;
+			adev->ip_blocks[i].status.hw = false;
 			break;
 		}
 	}
 
 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
-		if (!adev->ip_block_status[i].hw)
+		if (!adev->ip_blocks[i].status.hw)
 			continue;
-		if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
+		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
 			amdgpu_wb_fini(adev);
 			amdgpu_vram_scratch_fini(adev);
 		}
 		/* ungate blocks before hw fini so that we can shutdown the blocks safely */
-		r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
-								    AMD_CG_STATE_UNGATE);
+		r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+									     AMD_CG_STATE_UNGATE);
 		if (r) {
-			DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+			DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+				  adev->ip_blocks[i].version->funcs->name, r);
 			return r;
 		}
-		r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
+		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
 		/* XXX handle errors */
 		if (r) {
-			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
+				  adev->ip_blocks[i].version->funcs->name, r);
 		}
-		adev->ip_block_status[i].hw = false;
+		adev->ip_blocks[i].status.hw = false;
 	}
 
 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
-		if (!adev->ip_block_status[i].sw)
+		if (!adev->ip_blocks[i].status.sw)
 			continue;
-		r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
+		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
 		/* XXX handle errors */
 		if (r) {
-			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
+				  adev->ip_blocks[i].version->funcs->name, r);
 		}
-		adev->ip_block_status[i].sw = false;
-		adev->ip_block_status[i].valid = false;
+		adev->ip_blocks[i].status.sw = false;
+		adev->ip_blocks[i].status.valid = false;
 	}
 
 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
-		if (!adev->ip_block_status[i].late_initialized)
+		if (!adev->ip_blocks[i].status.late_initialized)
 			continue;
-		if (adev->ip_blocks[i].funcs->late_fini)
-			adev->ip_blocks[i].funcs->late_fini((void *)adev);
-		adev->ip_block_status[i].late_initialized = false;
+		if (adev->ip_blocks[i].version->funcs->late_fini)
+			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
+		adev->ip_blocks[i].status.late_initialized = false;
 	}
 
 	return 0;
@@ -1518,21 +1536,23 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
 	}
 
 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
 		/* ungate blocks so that suspend can properly shut them down */
 		if (i != AMD_IP_BLOCK_TYPE_SMC) {
-			r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
-									    AMD_CG_STATE_UNGATE);
+			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+										     AMD_CG_STATE_UNGATE);
 			if (r) {
-				DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+				DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+					  adev->ip_blocks[i].version->funcs->name, r);
 			}
 		}
 		/* XXX handle errors */
-		r = adev->ip_blocks[i].funcs->suspend(adev);
+		r = adev->ip_blocks[i].version->funcs->suspend(adev);
 		/* XXX handle errors */
 		if (r) {
-			DRM_ERROR("suspend of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+			DRM_ERROR("suspend of IP block <%s> failed %d\n",
+				  adev->ip_blocks[i].version->funcs->name, r);
 		}
 	}
 
@@ -1544,11 +1564,12 @@ static int amdgpu_resume(struct amdgpu_device *adev)
 	int i, r;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
-		r = adev->ip_blocks[i].funcs->resume(adev);
+		r = adev->ip_blocks[i].version->funcs->resume(adev);
 		if (r) {
-			DRM_ERROR("resume of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+			DRM_ERROR("resume of IP block <%s> failed %d\n",
+				  adev->ip_blocks[i].version->funcs->name, r);
 			return r;
 		}
 	}
@@ -1859,8 +1880,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
 	amdgpu_fence_driver_fini(adev);
 	amdgpu_fbdev_fini(adev);
 	r = amdgpu_fini(adev);
-	kfree(adev->ip_block_status);
-	adev->ip_block_status = NULL;
 	adev->accel_working = false;
 	/* free i2c buses */
 	amdgpu_i2c_fini(adev);
@@ -1956,7 +1975,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
 
 	r = amdgpu_suspend(adev);
 
-	/* evict remaining vram memory */
+	/* evict remaining vram memory
+	 * This second call to evict vram is to evict the gart page table
+	 * using the CPU.
+	 */
 	amdgpu_bo_evict_vram(adev);
 
 	pci_save_state(dev->pdev);
@@ -2096,13 +2118,13 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
 	bool asic_hang = false;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
-		if (adev->ip_blocks[i].funcs->check_soft_reset)
-			adev->ip_block_status[i].hang =
-				adev->ip_blocks[i].funcs->check_soft_reset(adev);
-		if (adev->ip_block_status[i].hang) {
-			DRM_INFO("IP block:%d is hang!\n", i);
+		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
+			adev->ip_blocks[i].status.hang =
+				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
+		if (adev->ip_blocks[i].status.hang) {
+			DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
 			asic_hang = true;
 		}
 	}
@@ -2114,11 +2136,11 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
 	int i, r = 0;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
-		if (adev->ip_block_status[i].hang &&
-		    adev->ip_blocks[i].funcs->pre_soft_reset) {
-			r = adev->ip_blocks[i].funcs->pre_soft_reset(adev);
+		if (adev->ip_blocks[i].status.hang &&
+		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
+			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
 			if (r)
 				return r;
 		}
@@ -2132,13 +2154,13 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
 	int i;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
-		if ((adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) ||
-		    (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) ||
-		    (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_ACP) ||
-		    (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_DCE)) {
-			if (adev->ip_block_status[i].hang) {
+		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
+		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
+		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
+		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
+			if (adev->ip_blocks[i].status.hang) {
 				DRM_INFO("Some block need full reset!\n");
 				return true;
 			}
@@ -2152,11 +2174,11 @@ static int amdgpu_soft_reset(struct amdgpu_device *adev)
 	int i, r = 0;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
-		if (adev->ip_block_status[i].hang &&
-		    adev->ip_blocks[i].funcs->soft_reset) {
-			r = adev->ip_blocks[i].funcs->soft_reset(adev);
+		if (adev->ip_blocks[i].status.hang &&
+		    adev->ip_blocks[i].version->funcs->soft_reset) {
+			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
 			if (r)
 				return r;
 		}
@@ -2170,11 +2192,11 @@ static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
 	int i, r = 0;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_block_status[i].valid)
+		if (!adev->ip_blocks[i].status.valid)
 			continue;
-		if (adev->ip_block_status[i].hang &&
-		    adev->ip_blocks[i].funcs->post_soft_reset)
-			r = adev->ip_blocks[i].funcs->post_soft_reset(adev);
+		if (adev->ip_blocks[i].status.hang &&
+		    adev->ip_blocks[i].version->funcs->post_soft_reset)
+			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
 		if (r)
 			return r;
 	}
@@ -2531,6 +2553,13 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
 		se_bank = (*pos >> 24) & 0x3FF;
 		sh_bank = (*pos >> 34) & 0x3FF;
 		instance_bank = (*pos >> 44) & 0x3FF;
+
+		if (se_bank == 0x3FF)
+			se_bank = 0xFFFFFFFF;
+		if (sh_bank == 0x3FF)
+			sh_bank = 0xFFFFFFFF;
+		if (instance_bank == 0x3FF)
+			instance_bank = 0xFFFFFFFF;
 		use_bank = 1;
 	} else {
 		use_bank = 0;
@@ -2539,8 +2568,8 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
 	*pos &= 0x3FFFF;
 
 	if (use_bank) {
-		if (sh_bank >= adev->gfx.config.max_sh_per_se ||
-		    se_bank >= adev->gfx.config.max_shader_engines)
+		if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
+		    (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
 			return -EINVAL;
 		mutex_lock(&adev->grbm_idx_mutex);
 		amdgpu_gfx_select_se_sh(adev, se_bank,
@@ -2587,10 +2616,45 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
 	struct amdgpu_device *adev = f->f_inode->i_private;
 	ssize_t result = 0;
 	int r;
+	bool pm_pg_lock, use_bank;
+	unsigned instance_bank, sh_bank, se_bank;
 
 	if (size & 0x3 || *pos & 0x3)
 		return -EINVAL;
 
+	/* are we reading registers for which a PG lock is necessary? */
+	pm_pg_lock = (*pos >> 23) & 1;
+
+	if (*pos & (1ULL << 62)) {
+		se_bank = (*pos >> 24) & 0x3FF;
+		sh_bank = (*pos >> 34) & 0x3FF;
+		instance_bank = (*pos >> 44) & 0x3FF;
+
+		if (se_bank == 0x3FF)
+			se_bank = 0xFFFFFFFF;
+		if (sh_bank == 0x3FF)
+			sh_bank = 0xFFFFFFFF;
+		if (instance_bank == 0x3FF)
+			instance_bank = 0xFFFFFFFF;
+		use_bank = 1;
+	} else {
+		use_bank = 0;
+	}
+
+	*pos &= 0x3FFFF;
+
+	if (use_bank) {
+		if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
+		    (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
+			return -EINVAL;
+		mutex_lock(&adev->grbm_idx_mutex);
+		amdgpu_gfx_select_se_sh(adev, se_bank,
+					sh_bank, instance_bank);
+	}
+
+	if (pm_pg_lock)
+		mutex_lock(&adev->pm.mutex);
+
 	while (size) {
 		uint32_t value;
 
@@ -2609,6 +2673,14 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
 		size -= 4;
 	}
 
+	if (use_bank) {
+		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+		mutex_unlock(&adev->grbm_idx_mutex);
+	}
+
+	if (pm_pg_lock)
+		mutex_unlock(&adev->pm.mutex);
+
 	return result;
 }
 
@@ -2871,6 +2943,56 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
 	return !r ? 4 : r;
 }
 
+static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
+					size_t size, loff_t *pos)
+{
+	struct amdgpu_device *adev = f->f_inode->i_private;
+	int r, x;
+	ssize_t result=0;
+	uint32_t offset, se, sh, cu, wave, simd, data[32];
+
+	if (size & 3 || *pos & 3)
+		return -EINVAL;
+
+	/* decode offset */
+	offset = (*pos & 0x7F);
+	se = ((*pos >> 7) & 0xFF);
+	sh = ((*pos >> 15) & 0xFF);
+	cu = ((*pos >> 23) & 0xFF);
+	wave = ((*pos >> 31) & 0xFF);
+	simd = ((*pos >> 37) & 0xFF);
+
+	/* switch to the specific se/sh/cu */
+	mutex_lock(&adev->grbm_idx_mutex);
+	amdgpu_gfx_select_se_sh(adev, se, sh, cu);
+
+	x = 0;
+	if (adev->gfx.funcs->read_wave_data)
+		adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
+
+	amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
+	mutex_unlock(&adev->grbm_idx_mutex);
+
+	if (!x)
+		return -EINVAL;
+
+	while (size && (offset < x * 4)) {
+		uint32_t value;
+
+		value = data[offset >> 2];
+		r = put_user(value, (uint32_t *)buf);
+		if (r)
+			return r;
+
+		result += 4;
+		buf += 4;
+		offset += 4;
+		size -= 4;
+	}
+
+	return result;
+}
+
 static const struct file_operations amdgpu_debugfs_regs_fops = {
 	.owner = THIS_MODULE,
 	.read = amdgpu_debugfs_regs_read,
@@ -2908,6 +3030,12 @@ static const struct file_operations amdgpu_debugfs_sensors_fops = {
 	.llseek = default_llseek
 };
 
+static const struct file_operations amdgpu_debugfs_wave_fops = {
+	.owner = THIS_MODULE,
+	.read = amdgpu_debugfs_wave_read,
+	.llseek = default_llseek
+};
+
 static const struct file_operations *debugfs_regs[] = {
 	&amdgpu_debugfs_regs_fops,
 	&amdgpu_debugfs_regs_didt_fops,
@@ -2915,6 +3043,7 @@ static const struct file_operations *debugfs_regs[] = {
 	&amdgpu_debugfs_regs_smc_fops,
 	&amdgpu_debugfs_gca_config_fops,
 	&amdgpu_debugfs_sensors_fops,
+	&amdgpu_debugfs_wave_fops,
 };
 
 static const char *debugfs_regs_names[] = {
@@ -2924,6 +3053,7 @@ static const char *debugfs_regs_names[] = {
 	"amdgpu_regs_smc",
 	"amdgpu_gca_config",
 	"amdgpu_sensors",
+	"amdgpu_wave",
 };
 
 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 083e2b42987272d96f57fa091f92426651751a61..c7bc2b3c1b972ea798b163068e556074352833bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -68,9 +68,9 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
 	struct amdgpu_flip_work *work =
 		container_of(delayed_work, struct amdgpu_flip_work, flip_work);
 	struct amdgpu_device *adev = work->adev;
-	struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];
+	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
 
-	struct drm_crtc *crtc = &amdgpuCrtc->base;
+	struct drm_crtc *crtc = &amdgpu_crtc->base;
 	unsigned long flags;
 	unsigned i;
 	int vpos, hpos;
@@ -85,14 +85,14 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
 	/* Wait until we're out of the vertical blank period before the one
 	 * targeted by the flip
 	 */
-	if (amdgpuCrtc->enabled &&
+	if (amdgpu_crtc->enabled &&
 	    (amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
 					&vpos, &hpos, NULL, NULL,
 					&crtc->hwmode)
 	     & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
 	    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
 	    (int)(work->target_vblank -
-		  amdgpu_get_vblank_counter_kms(adev->ddev, amdgpuCrtc->crtc_id)) > 0) {
+		  amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) {
 		schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
 		return;
 	}
@@ -104,12 +104,12 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
 	adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
 
 	/* Set the flip status */
-	amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
+	amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 
 
 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
-					 amdgpuCrtc->crtc_id, amdgpuCrtc, work);
+					 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
 
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 14f57d9915e3fc0aa8d5a8e77b734073f05ecb9b..6ca0333ca4c0f5398cfac5197c216164b39a49b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -553,9 +553,10 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
 				entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
 					((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
 			}
-			for (i = 0; i < states->numEntries; i++) {
-				if (i >= AMDGPU_MAX_VCE_LEVELS)
-					break;
+			adev->pm.dpm.num_of_vce_states =
+					states->numEntries > AMD_MAX_VCE_LEVELS ?
+					AMD_MAX_VCE_LEVELS : states->numEntries;
+			for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
 				vce_clk = (VCEClockInfo *)
 					((u8 *)&array->entries[0] +
 					 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
@@ -955,3 +956,12 @@ u8 amdgpu_encode_pci_lane_width(u32 lanes)
 
 	return encoded_lanes[lanes];
 }
+
+struct amd_vce_state*
+amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx)
+{
+	if (idx < adev->pm.dpm.num_of_vce_states)
+		return &adev->pm.dpm.vce_states[idx];
+
+	return NULL;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 3738a96c2619dd2f0743116de975079733742903..bd85e35998e7cc2999b19519fd646e9a7578ecac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -23,6 +23,446 @@
 #ifndef __AMDGPU_DPM_H__
 #define __AMDGPU_DPM_H__
 
+enum amdgpu_int_thermal_type {
+	THERMAL_TYPE_NONE,
+	THERMAL_TYPE_EXTERNAL,
+	THERMAL_TYPE_EXTERNAL_GPIO,
+	THERMAL_TYPE_RV6XX,
+	THERMAL_TYPE_RV770,
+	THERMAL_TYPE_ADT7473_WITH_INTERNAL,
+	THERMAL_TYPE_EVERGREEN,
+	THERMAL_TYPE_SUMO,
+	THERMAL_TYPE_NI,
+	THERMAL_TYPE_SI,
+	THERMAL_TYPE_EMC2103_WITH_INTERNAL,
+	THERMAL_TYPE_CI,
+	THERMAL_TYPE_KV,
+};
+
+enum amdgpu_dpm_auto_throttle_src {
+	AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
+	AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
+};
+
+enum amdgpu_dpm_event_src {
+	AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
+	AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
+	AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
+	AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
+	AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
+};
+
+struct amdgpu_ps {
+	u32 caps; /* vbios flags */
+	u32 class; /* vbios flags */
+	u32 class2; /* vbios flags */
+	/* UVD clocks */
+	u32 vclk;
+	u32 dclk;
+	/* VCE clocks */
+	u32 evclk;
+	u32 ecclk;
+	bool vce_active;
+	enum amd_vce_level vce_level;
+	/* asic priv */
+	void *ps_priv;
+};
+
+struct amdgpu_dpm_thermal {
+	/* thermal interrupt work */
+	struct work_struct work;
+	/* low temperature threshold */
+	int                min_temp;
+	/* high temperature threshold */
+	int                max_temp;
+	/* was last interrupt low to high or high to low */
+	bool               high_to_low;
+	/* interrupt source */
+	struct amdgpu_irq_src	irq;
+};
+
+enum amdgpu_clk_action
+{
+	AMDGPU_SCLK_UP = 1,
+	AMDGPU_SCLK_DOWN
+};
+
+struct amdgpu_blacklist_clocks
+{
+	u32 sclk;
+	u32 mclk;
+	enum amdgpu_clk_action action;
+};
+
+struct amdgpu_clock_and_voltage_limits {
+	u32 sclk;
+	u32 mclk;
+	u16 vddc;
+	u16 vddci;
+};
+
+struct amdgpu_clock_array {
+	u32 count;
+	u32 *values;
+};
+
+struct amdgpu_clock_voltage_dependency_entry {
+	u32 clk;
+	u16 v;
+};
+
+struct amdgpu_clock_voltage_dependency_table {
+	u32 count;
+	struct amdgpu_clock_voltage_dependency_entry *entries;
+};
+
+union amdgpu_cac_leakage_entry {
+	struct {
+		u16 vddc;
+		u32 leakage;
+	};
+	struct {
+		u16 vddc1;
+		u16 vddc2;
+		u16 vddc3;
+	};
+};
+
+struct amdgpu_cac_leakage_table {
+	u32 count;
+	union amdgpu_cac_leakage_entry *entries;
+};
+
+struct amdgpu_phase_shedding_limits_entry {
+	u16 voltage;
+	u32 sclk;
+	u32 mclk;
+};
+
+struct amdgpu_phase_shedding_limits_table {
+	u32 count;
+	struct amdgpu_phase_shedding_limits_entry *entries;
+};
+
+struct amdgpu_uvd_clock_voltage_dependency_entry {
+	u32 vclk;
+	u32 dclk;
+	u16 v;
+};
+
+struct amdgpu_uvd_clock_voltage_dependency_table {
+	u8 count;
+	struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
+};
+
+struct amdgpu_vce_clock_voltage_dependency_entry {
+	u32 ecclk;
+	u32 evclk;
+	u16 v;
+};
+
+struct amdgpu_vce_clock_voltage_dependency_table {
+	u8 count;
+	struct amdgpu_vce_clock_voltage_dependency_entry *entries;
+};
+
+struct amdgpu_ppm_table {
+	u8 ppm_design;
+	u16 cpu_core_number;
+	u32 platform_tdp;
+	u32 small_ac_platform_tdp;
+	u32 platform_tdc;
+	u32 small_ac_platform_tdc;
+	u32 apu_tdp;
+	u32 dgpu_tdp;
+	u32 dgpu_ulv_power;
+	u32 tj_max;
+};
+
+struct amdgpu_cac_tdp_table {
+	u16 tdp;
+	u16 configurable_tdp;
+	u16 tdc;
+	u16 battery_power_limit;
+	u16 small_power_limit;
+	u16 low_cac_leakage;
+	u16 high_cac_leakage;
+	u16 maximum_power_delivery_limit;
+};
+
+struct amdgpu_dpm_dynamic_state {
+	struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
+	struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
+	struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
+	struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
+	struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
+	struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
+	struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
+	struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
+	struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
+	struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
+	struct amdgpu_clock_array valid_sclk_values;
+	struct amdgpu_clock_array valid_mclk_values;
+	struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
+	struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
+	u32 mclk_sclk_ratio;
+	u32 sclk_mclk_delta;
+	u16 vddc_vddci_delta;
+	u16 min_vddc_for_pcie_gen2;
+	struct amdgpu_cac_leakage_table cac_leakage_table;
+	struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
+	struct amdgpu_ppm_table *ppm_table;
+	struct amdgpu_cac_tdp_table *cac_tdp_table;
+};
+
+struct amdgpu_dpm_fan {
+	u16 t_min;
+	u16 t_med;
+	u16 t_high;
+	u16 pwm_min;
+	u16 pwm_med;
+	u16 pwm_high;
+	u8 t_hyst;
+	u32 cycle_delay;
+	u16 t_max;
+	u8 control_mode;
+	u16 default_max_fan_pwm;
+	u16 default_fan_output_sensitivity;
+	u16 fan_output_sensitivity;
+	bool ucode_fan_control;
+};
+
+enum amdgpu_pcie_gen {
+	AMDGPU_PCIE_GEN1 = 0,
+	AMDGPU_PCIE_GEN2 = 1,
+	AMDGPU_PCIE_GEN3 = 2,
+	AMDGPU_PCIE_GEN_INVALID = 0xffff
+};
+
+enum amdgpu_dpm_forced_level {
+	AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
+	AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
+	AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
+	AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3,
+};
+
+struct amdgpu_dpm_funcs {
+	int (*get_temperature)(struct amdgpu_device *adev);
+	int (*pre_set_power_state)(struct amdgpu_device *adev);
+	int (*set_power_state)(struct amdgpu_device *adev);
+	void (*post_set_power_state)(struct amdgpu_device *adev);
+	void (*display_configuration_changed)(struct amdgpu_device *adev);
+	u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
+	u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
+	void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
+	void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
+	int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
+	bool (*vblank_too_short)(struct amdgpu_device *adev);
+	void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
+	void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
+	void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
+	void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
+	u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
+	int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
+	int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
+	int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
+	int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
+	int (*get_sclk_od)(struct amdgpu_device *adev);
+	int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
+	int (*get_mclk_od)(struct amdgpu_device *adev);
+	int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
+	int (*check_state_equal)(struct amdgpu_device *adev,
+				struct amdgpu_ps *cps,
+				struct amdgpu_ps *rps,
+				bool *equal);
+
+	struct amd_vce_state* (*get_vce_clock_state)(struct amdgpu_device *adev, unsigned idx);
+};
+
+#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
+#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
+#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
+#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
+#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
+#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
+#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
+
+#define amdgpu_dpm_read_sensor(adev, idx, value) \
+	((adev)->pp_enabled ? \
+		(adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \
+		-EINVAL)
+
+#define amdgpu_dpm_get_temperature(adev) \
+	((adev)->pp_enabled ?						\
+	      (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
+	      (adev)->pm.funcs->get_temperature((adev)))
+
+#define amdgpu_dpm_set_fan_control_mode(adev, m) \
+	((adev)->pp_enabled ?						\
+	      (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
+	      (adev)->pm.funcs->set_fan_control_mode((adev), (m)))
+
+#define amdgpu_dpm_get_fan_control_mode(adev) \
+	((adev)->pp_enabled ?						\
+	      (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
+	      (adev)->pm.funcs->get_fan_control_mode((adev)))
+
+#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
+	((adev)->pp_enabled ?						\
+	      (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
+	      (adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
+
+#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
+	((adev)->pp_enabled ?						\
+	      (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
+	      (adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
+
+#define amdgpu_dpm_get_sclk(adev, l) \
+	((adev)->pp_enabled ?						\
+	      (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
+		(adev)->pm.funcs->get_sclk((adev), (l)))
+
+#define amdgpu_dpm_get_mclk(adev, l)  \
+	((adev)->pp_enabled ?						\
+	      (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
+	      (adev)->pm.funcs->get_mclk((adev), (l)))
+
+
+#define amdgpu_dpm_force_performance_level(adev, l) \
+	((adev)->pp_enabled ?						\
+	      (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
+	      (adev)->pm.funcs->force_performance_level((adev), (l)))
+
+#define amdgpu_dpm_powergate_uvd(adev, g) \
+	((adev)->pp_enabled ?						\
+	      (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
+	      (adev)->pm.funcs->powergate_uvd((adev), (g)))
+
+#define amdgpu_dpm_powergate_vce(adev, g) \
+	((adev)->pp_enabled ?						\
+	      (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
+	      (adev)->pm.funcs->powergate_vce((adev), (g)))
+
+#define amdgpu_dpm_get_current_power_state(adev) \
+	(adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
+
+#define amdgpu_dpm_get_performance_level(adev) \
+	(adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)
+
+#define amdgpu_dpm_get_pp_num_states(adev, data) \
+	(adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
+
+#define amdgpu_dpm_get_pp_table(adev, table) \
+	(adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
+
+#define amdgpu_dpm_set_pp_table(adev, buf, size) \
+	(adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
+
+#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
+	(adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
+
+#define amdgpu_dpm_force_clock_level(adev, type, level) \
+		(adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
+
+#define amdgpu_dpm_get_sclk_od(adev) \
+	(adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
+
+#define amdgpu_dpm_set_sclk_od(adev, value) \
+	(adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
+
+#define amdgpu_dpm_get_mclk_od(adev) \
+	((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_set_mclk_od(adev, value) \
+	((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
+
+#define amdgpu_dpm_dispatch_task(adev, event_id, input, output)		\
+	(adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
+
+#define amgdpu_dpm_check_state_equal(adev, cps, rps, equal) (adev)->pm.funcs->check_state_equal((adev), (cps),(rps),(equal))
+
+#define amdgpu_dpm_get_vce_clock_state(adev, i)				\
+	((adev)->pp_enabled ?						\
+	 (adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)) : \
+	 (adev)->pm.funcs->get_vce_clock_state((adev), (i)))
+
+struct amdgpu_dpm {
+	struct amdgpu_ps        *ps;
+	/* number of valid power states */
+	int                     num_ps;
+	/* current power state that is active */
+	struct amdgpu_ps        *current_ps;
+	/* requested power state */
+	struct amdgpu_ps        *requested_ps;
+	/* boot up power state */
+	struct amdgpu_ps        *boot_ps;
+	/* default uvd power state */
+	struct amdgpu_ps        *uvd_ps;
+	/* vce requirements */
+	u32                  num_of_vce_states;
+	struct amd_vce_state vce_states[AMD_MAX_VCE_LEVELS];
+	enum amd_vce_level vce_level;
+	enum amd_pm_state_type state;
+	enum amd_pm_state_type user_state;
+	enum amd_pm_state_type last_state;
+	enum amd_pm_state_type last_user_state;
+	u32                     platform_caps;
+	u32                     voltage_response_time;
+	u32                     backbias_response_time;
+	void                    *priv;
+	u32			new_active_crtcs;
+	int			new_active_crtc_count;
+	u32			current_active_crtcs;
+	int			current_active_crtc_count;
+	struct amdgpu_dpm_dynamic_state dyn_state;
+	struct amdgpu_dpm_fan fan;
+	u32 tdp_limit;
+	u32 near_tdp_limit;
+	u32 near_tdp_limit_adjusted;
+	u32 sq_ramping_threshold;
+	u32 cac_leakage;
+	u16 tdp_od_limit;
+	u32 tdp_adjustment;
+	u16 load_line_slope;
+	bool power_control;
+	bool ac_power;
+	/* special states active */
+	bool                    thermal_active;
+	bool                    uvd_active;
+	bool                    vce_active;
+	/* thermal handling */
+	struct amdgpu_dpm_thermal thermal;
+	/* forced levels */
+	enum amdgpu_dpm_forced_level forced_level;
+};
+
+struct amdgpu_pm {
+	struct mutex		mutex;
+	u32                     current_sclk;
+	u32                     current_mclk;
+	u32                     default_sclk;
+	u32                     default_mclk;
+	struct amdgpu_i2c_chan *i2c_bus;
+	/* internal thermal controller on rv6xx+ */
+	enum amdgpu_int_thermal_type int_thermal_type;
+	struct device	        *int_hwmon_dev;
+	/* fan control parameters */
+	bool                    no_fan;
+	u8                      fan_pulses_per_revolution;
+	u8                      fan_min_rpm;
+	u8                      fan_max_rpm;
+	/* dpm */
+	bool                    dpm_enabled;
+	bool                    sysfs_initialized;
+	struct amdgpu_dpm       dpm;
+	const struct firmware	*fw;	/* SMC firmware */
+	uint32_t                fw_version;
+	const struct amdgpu_dpm_funcs *funcs;
+	uint32_t                pcie_gen_mask;
+	uint32_t                pcie_mlw_mask;
+	struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
+};
+
 #define R600_SSTU_DFLT                               0
 #define R600_SST_DFLT                                0x00C8
 
@@ -82,4 +522,7 @@ u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
 				 u16 default_lanes);
 u8 amdgpu_encode_pci_lane_width(u32 lanes);
 
+struct amd_vce_state*
+amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx);
+
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 71ed27eb3ddebd3d2463cc136fb268db74aceb60..6bb4d9e9afe4e5eb3ec538da12c3193618f245dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -58,9 +58,10 @@
  * - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer.
  * - 3.7.0 - Add support for VCE clock list packet
  * - 3.8.0 - Add support raster config init in the kernel
+ * - 3.9.0 - Add support for memory query info about VRAM and GTT.
  */
 #define KMS_DRIVER_MAJOR	3
-#define KMS_DRIVER_MINOR	8
+#define KMS_DRIVER_MINOR	9
 #define KMS_DRIVER_PATCHLEVEL	0
 
 int amdgpu_vram_limit = 0;
@@ -85,6 +86,7 @@ int amdgpu_vm_size = 64;
 int amdgpu_vm_block_size = -1;
 int amdgpu_vm_fault_stop = 0;
 int amdgpu_vm_debug = 0;
+int amdgpu_vram_page_split = 1024;
 int amdgpu_exp_hw_support = 0;
 int amdgpu_sched_jobs = 32;
 int amdgpu_sched_hw_submission = 2;
@@ -165,6 +167,9 @@ module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444);
 MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)");
 module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
 
+MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 1024, -1 = disable)");
+module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
+
 MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
 module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
 
@@ -201,7 +206,8 @@ module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
 MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
 module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
 
-MODULE_PARM_DESC(virtual_display, "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x;xxxx:xx:xx.x)");
+MODULE_PARM_DESC(virtual_display,
+		 "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x)");
 module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
 
 static const struct pci_device_id pciidlist[] = {
@@ -381,6 +387,7 @@ static const struct pci_device_id pciidlist[] = {
 	{0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
 	/* fiji */
 	{0x1002, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
+	{0x1002, 0x730F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
 	/* carrizo */
 	{0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
 	{0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 8d01aa24d68afd75004d1e2f0f41dcf3bd80a12c..38bdc2d300a31d9f2d292f1f1fd920b4f67a1a6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -152,7 +152,8 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
 	aligned_size = ALIGN(size, PAGE_SIZE);
 	ret = amdgpu_gem_object_create(adev, aligned_size, 0,
 				       AMDGPU_GEM_DOMAIN_VRAM,
-				       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+				       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+				       AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 				       true, &gobj);
 	if (ret) {
 		printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 21a1242fc13b02ed5b6de3a962176500f127f7b2..964d2a946ed5514278f09642cbec0bf3439724d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -126,7 +126,8 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
 	if (adev->gart.robj == NULL) {
 		r = amdgpu_bo_create(adev, adev->gart.table_size,
 				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
-				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+				     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 				     NULL, NULL, &adev->gart.robj);
 		if (r) {
 			return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 3ad0bf6ce3e42bad845ce01a5512a833996ecbc5..cd62f6ffde2ac770c96596674396687cef62f804 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -116,10 +116,11 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
  * Call from drm_gem_handle_create which appear in both new and open ioctl
  * case.
  */
-int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+int amdgpu_gem_object_open(struct drm_gem_object *obj,
+			   struct drm_file *file_priv)
 {
 	struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
-	struct amdgpu_device *adev = abo->adev;
+	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
 	struct amdgpu_vm *vm = &fpriv->vm;
 	struct amdgpu_bo_va *bo_va;
@@ -142,7 +143,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
 			     struct drm_file *file_priv)
 {
 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
-	struct amdgpu_device *adev = bo->adev;
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
 	struct amdgpu_vm *vm = &fpriv->vm;
 
@@ -468,6 +469,16 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
 	return r;
 }
 
+static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
+{
+	unsigned domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+
+	/* if anything is swapped out don't swap it in here,
+	   just abort and wait for the next CS */
+
+	return domain == AMDGPU_GEM_DOMAIN_CPU ? -ERESTARTSYS : 0;
+}
+
 /**
  * amdgpu_gem_va_update_vm -update the bo_va in its VM
  *
@@ -478,7 +489,8 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
  * vital here, so they are not reported back to userspace.
  */
 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
-				    struct amdgpu_bo_va *bo_va, uint32_t operation)
+				    struct amdgpu_bo_va *bo_va,
+				    uint32_t operation)
 {
 	struct ttm_validate_buffer tv, *entry;
 	struct amdgpu_bo_list_entry vm_pd;
@@ -501,7 +513,6 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
 	if (r)
 		goto error_print;
 
-	amdgpu_vm_get_pt_bos(adev, bo_va->vm, &duplicates);
 	list_for_each_entry(entry, &list, head) {
 		domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
 		/* if anything is swapped out don't swap it in here,
@@ -509,13 +520,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
 		if (domain == AMDGPU_GEM_DOMAIN_CPU)
 			goto error_unreserve;
 	}
-	list_for_each_entry(entry, &duplicates, head) {
-		domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
-		/* if anything is swapped out don't swap it in here,
-		   just abort and wait for the next CS */
-		if (domain == AMDGPU_GEM_DOMAIN_CPU)
-			goto error_unreserve;
-	}
+	r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check,
+				      NULL);
+	if (r)
+		goto error_unreserve;
 
 	r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
 	if (r)
@@ -536,8 +544,6 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
 		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
 }
 
-
-
 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 			  struct drm_file *filp)
 {
@@ -547,7 +553,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
 	struct amdgpu_bo *abo;
 	struct amdgpu_bo_va *bo_va;
-	struct ttm_validate_buffer tv, tv_pd;
+	struct amdgpu_bo_list_entry vm_pd;
+	struct ttm_validate_buffer tv;
 	struct ww_acquire_ctx ticket;
 	struct list_head list, duplicates;
 	uint32_t invalid_flags, va_flags = 0;
@@ -592,9 +599,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 	tv.shared = true;
 	list_add(&tv.head, &list);
 
-	tv_pd.bo = &fpriv->vm.page_directory->tbo;
-	tv_pd.shared = true;
-	list_add(&tv_pd.head, &list);
+	amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
 
 	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
 	if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index a074edd95c701c2705e2323bfaf713692a852654..01a42b6a69a4bb8b3b1653e57bf28da6cb5911e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -24,6 +24,7 @@
  */
 #include <drm/drmP.h>
 #include "amdgpu.h"
+#include "amdgpu_gfx.h"
 
 /*
  * GPU scratch registers helpers function.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 51321e154c09997037cf51ffa8bad5a0a90f5e67..e020440864450130ed0b2a017b68214f352c5126 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -27,6 +27,7 @@
 int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg);
 void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg);
 
-unsigned amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh);
+void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se,
+		unsigned max_sh);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index f86c84427778aecb1ccf2cefd61afb95af3ebf38..3c634f02a3d5b259bbc4f94a8cdf054e8e35457a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -168,6 +168,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
 		return -ENOMEM;
 
 	node->start = AMDGPU_BO_INVALID_OFFSET;
+	node->size = mem->num_pages;
 	mem->mm_node = node;
 
 	if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 6a6c86c9c1694eb475b9671c47cfaa4738473ed8..16308eb22e7ff8954c4131afb558e4e62617c834 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -152,8 +152,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 		return -EINVAL;
 	}
 
-	alloc_size = amdgpu_ring_get_dma_frame_size(ring) +
-		num_ibs * amdgpu_ring_get_emit_ib_size(ring);
+	alloc_size = ring->funcs->emit_frame_size + num_ibs *
+		ring->funcs->emit_ib_size;
 
 	r = amdgpu_ring_alloc(ring, alloc_size);
 	if (r) {
@@ -161,7 +161,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 		return r;
 	}
 
-	if (ring->type == AMDGPU_RING_TYPE_SDMA && ring->funcs->init_cond_exec)
+	if (ring->funcs->init_cond_exec)
 		patch_offset = amdgpu_ring_init_cond_exec(ring);
 
 	if (vm) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index c2c7fb140338061f77cc3d2560e2f99f46e37346..78392671046a06959119fda41542be2e1729e169 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -306,10 +306,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 		}
 
 		for (i = 0; i < adev->num_ip_blocks; i++) {
-			if (adev->ip_blocks[i].type == type &&
-			    adev->ip_block_status[i].valid) {
-				ip.hw_ip_version_major = adev->ip_blocks[i].major;
-				ip.hw_ip_version_minor = adev->ip_blocks[i].minor;
+			if (adev->ip_blocks[i].version->type == type &&
+			    adev->ip_blocks[i].status.valid) {
+				ip.hw_ip_version_major = adev->ip_blocks[i].version->major;
+				ip.hw_ip_version_minor = adev->ip_blocks[i].version->minor;
 				ip.capabilities_flags = 0;
 				ip.available_rings = ring_mask;
 				ip.ib_start_alignment = ib_start_alignment;
@@ -345,8 +345,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 		}
 
 		for (i = 0; i < adev->num_ip_blocks; i++)
-			if (adev->ip_blocks[i].type == type &&
-			    adev->ip_block_status[i].valid &&
+			if (adev->ip_blocks[i].version->type == type &&
+			    adev->ip_blocks[i].status.valid &&
 			    count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
 				count++;
 
@@ -411,6 +411,36 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 		return copy_to_user(out, &vram_gtt,
 				    min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
 	}
+	case AMDGPU_INFO_MEMORY: {
+		struct drm_amdgpu_memory_info mem;
+
+		memset(&mem, 0, sizeof(mem));
+		mem.vram.total_heap_size = adev->mc.real_vram_size;
+		mem.vram.usable_heap_size =
+			adev->mc.real_vram_size - adev->vram_pin_size;
+		mem.vram.heap_usage = atomic64_read(&adev->vram_usage);
+		mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
+
+		mem.cpu_accessible_vram.total_heap_size =
+			adev->mc.visible_vram_size;
+		mem.cpu_accessible_vram.usable_heap_size =
+			adev->mc.visible_vram_size -
+			(adev->vram_pin_size - adev->invisible_pin_size);
+		mem.cpu_accessible_vram.heap_usage =
+			atomic64_read(&adev->vram_vis_usage);
+		mem.cpu_accessible_vram.max_allocation =
+			mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
+
+		mem.gtt.total_heap_size = adev->mc.gtt_size;
+		mem.gtt.usable_heap_size =
+			adev->mc.gtt_size - adev->gart_pin_size;
+		mem.gtt.heap_usage = atomic64_read(&adev->gtt_usage);
+		mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
+
+		return copy_to_user(out, &mem,
+				    min((size_t)size, sizeof(mem)))
+				    ? -EFAULT : 0;
+	}
 	case AMDGPU_INFO_READ_MMR_REG: {
 		unsigned n, alloc_size;
 		uint32_t *regs;
@@ -475,6 +505,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 		dev_info.ids_flags = 0;
 		if (adev->flags & AMD_IS_APU)
 			dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
+		if (amdgpu_sriov_vf(adev))
+			dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
 		dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
 		dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
 		dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
@@ -494,6 +526,24 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 		return copy_to_user(out, &dev_info,
 				    min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
 	}
+	case AMDGPU_INFO_VCE_CLOCK_TABLE: {
+		unsigned i;
+		struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
+		struct amd_vce_state *vce_state;
+
+		for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
+			vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
+			if (vce_state) {
+				vce_clk_table.entries[i].sclk = vce_state->sclk;
+				vce_clk_table.entries[i].mclk = vce_state->mclk;
+				vce_clk_table.entries[i].eclk = vce_state->evclk;
+				vce_clk_table.num_valid_entries++;
+			}
+		}
+
+		return copy_to_user(out, &vce_clk_table,
+				    min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
+	}
 	default:
 		DRM_DEBUG_KMS("Invalid request %d\n", info->query);
 		return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 32fa7b7913f787263548299e9dc5e7c788ca41a4..7ea3cacf9f9fb14f48e203b7cfabadf98e39e36d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -285,7 +285,7 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
 int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
 {
 	unsigned long end = addr + amdgpu_bo_size(bo) - 1;
-	struct amdgpu_device *adev = bo->adev;
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 	struct amdgpu_mn *rmn;
 	struct amdgpu_mn_node *node = NULL;
 	struct list_head bos;
@@ -340,7 +340,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
  */
 void amdgpu_mn_unregister(struct amdgpu_bo *bo)
 {
-	struct amdgpu_device *adev = bo->adev;
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 	struct amdgpu_mn *rmn;
 	struct list_head *head;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 7b0eff7d060b82faba0945499ccdaac06ea255da..1e23334b07fb7d1d06896d692cf9c7e8f21b6346 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -341,8 +341,6 @@ struct amdgpu_mode_info {
 	int			num_dig; /* number of dig blocks */
 	int			disp_priority;
 	const struct amdgpu_display_funcs *funcs;
-	struct hrtimer vblank_timer;
-	enum amdgpu_interrupt_state vsync_timer_enabled;
 };
 
 #define AMDGPU_MAX_BL_LEVEL 0xFF
@@ -413,6 +411,9 @@ struct amdgpu_crtc {
 	u32 wm_high;
 	u32 lb_vblank_lead_lines;
 	struct drm_display_mode hw_mode;
+	/* for virtual dce */
+	struct hrtimer vblank_timer;
+	enum amdgpu_interrupt_state vsync_timer_enabled;
 };
 
 struct amdgpu_encoder_atom_dig {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index aa074fac0c7f66ef796b2da0704c314c96f2b5d5..6efa8d73b394204aebe0736ed5a9558f256aaa4d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -88,18 +88,19 @@ static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
 
 static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 {
+	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
 	struct amdgpu_bo *bo;
 
 	bo = container_of(tbo, struct amdgpu_bo, tbo);
 
-	amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL);
+	amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL);
 
 	drm_gem_object_release(&bo->gem_base);
 	amdgpu_bo_unref(&bo->parent);
 	if (!list_empty(&bo->shadow_list)) {
-		mutex_lock(&bo->adev->shadow_list_lock);
+		mutex_lock(&adev->shadow_list_lock);
 		list_del_init(&bo->shadow_list);
-		mutex_unlock(&bo->adev->shadow_list_lock);
+		mutex_unlock(&adev->shadow_list_lock);
 	}
 	kfree(bo->metadata);
 	kfree(bo);
@@ -121,12 +122,17 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
 
 	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
 		unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
+		unsigned lpfn = 0;
+
+		/* This forces a reallocation if the flag wasn't set before */
+		if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
+			lpfn = adev->mc.real_vram_size >> PAGE_SHIFT;
 
 		if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS &&
 		    !(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
 		    adev->mc.visible_vram_size < adev->mc.real_vram_size) {
 			places[c].fpfn = visible_pfn;
-			places[c].lpfn = 0;
+			places[c].lpfn = lpfn;
 			places[c].flags = TTM_PL_FLAG_WC |
 				TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM |
 				TTM_PL_FLAG_TOPDOWN;
@@ -134,7 +140,7 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
 		}
 
 		places[c].fpfn = 0;
-		places[c].lpfn = 0;
+		places[c].lpfn = lpfn;
 		places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
 			TTM_PL_FLAG_VRAM;
 		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
@@ -205,8 +211,10 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
 
 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
 {
-	amdgpu_ttm_placement_init(abo->adev, &abo->placement,
-				  abo->placements, domain, abo->flags);
+	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+
+	amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements,
+				  domain, abo->flags);
 }
 
 static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
@@ -245,7 +253,8 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
 	int r;
 
 	r = amdgpu_bo_create(adev, size, align, true, domain,
-			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+			     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 			     NULL, NULL, bo_ptr);
 	if (r) {
 		dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r);
@@ -351,7 +360,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
 		kfree(bo);
 		return r;
 	}
-	bo->adev = adev;
 	INIT_LIST_HEAD(&bo->shadow_list);
 	INIT_LIST_HEAD(&bo->va);
 	bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
@@ -616,6 +624,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 			     u64 min_offset, u64 max_offset,
 			     u64 *gpu_addr)
 {
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 	int r, i;
 	unsigned fpfn, lpfn;
 
@@ -643,18 +652,20 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 
 		return 0;
 	}
+
+	bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 	amdgpu_ttm_placement_from_domain(bo, domain);
 	for (i = 0; i < bo->placement.num_placement; i++) {
 		/* force to pin into visible video ram */
 		if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
 		    !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
 		    (!max_offset || max_offset >
-		     bo->adev->mc.visible_vram_size)) {
+		     adev->mc.visible_vram_size)) {
 			if (WARN_ON_ONCE(min_offset >
-					 bo->adev->mc.visible_vram_size))
+					 adev->mc.visible_vram_size))
 				return -EINVAL;
 			fpfn = min_offset >> PAGE_SHIFT;
-			lpfn = bo->adev->mc.visible_vram_size >> PAGE_SHIFT;
+			lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
 		} else {
 			fpfn = min_offset >> PAGE_SHIFT;
 			lpfn = max_offset >> PAGE_SHIFT;
@@ -669,12 +680,12 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 
 	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
 	if (unlikely(r)) {
-		dev_err(bo->adev->dev, "%p pin failed\n", bo);
+		dev_err(adev->dev, "%p pin failed\n", bo);
 		goto error;
 	}
 	r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
 	if (unlikely(r)) {
-		dev_err(bo->adev->dev, "%p bind failed\n", bo);
+		dev_err(adev->dev, "%p bind failed\n", bo);
 		goto error;
 	}
 
@@ -682,11 +693,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 	if (gpu_addr != NULL)
 		*gpu_addr = amdgpu_bo_gpu_offset(bo);
 	if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
-		bo->adev->vram_pin_size += amdgpu_bo_size(bo);
+		adev->vram_pin_size += amdgpu_bo_size(bo);
 		if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
-			bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
+			adev->invisible_pin_size += amdgpu_bo_size(bo);
 	} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
-		bo->adev->gart_pin_size += amdgpu_bo_size(bo);
+		adev->gart_pin_size += amdgpu_bo_size(bo);
 	}
 
 error:
@@ -700,10 +711,11 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
 
 int amdgpu_bo_unpin(struct amdgpu_bo *bo)
 {
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 	int r, i;
 
 	if (!bo->pin_count) {
-		dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo);
+		dev_warn(adev->dev, "%p unpin not necessary\n", bo);
 		return 0;
 	}
 	bo->pin_count--;
@@ -715,16 +727,16 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
 	}
 	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
 	if (unlikely(r)) {
-		dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
+		dev_err(adev->dev, "%p validate failed for unpin\n", bo);
 		goto error;
 	}
 
 	if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
-		bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
+		adev->vram_pin_size -= amdgpu_bo_size(bo);
 		if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
-			bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
+			adev->invisible_pin_size -= amdgpu_bo_size(bo);
 	} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
-		bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
+		adev->gart_pin_size -= amdgpu_bo_size(bo);
 	}
 
 error:
@@ -849,6 +861,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 			   struct ttm_mem_reg *new_mem)
 {
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 	struct amdgpu_bo *abo;
 	struct ttm_mem_reg *old_mem = &bo->mem;
 
@@ -856,21 +869,21 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 		return;
 
 	abo = container_of(bo, struct amdgpu_bo, tbo);
-	amdgpu_vm_bo_invalidate(abo->adev, abo);
+	amdgpu_vm_bo_invalidate(adev, abo);
 
 	/* update statistics */
 	if (!new_mem)
 		return;
 
 	/* move_notify is called before move happens */
-	amdgpu_update_memory_usage(abo->adev, &bo->mem, new_mem);
+	amdgpu_update_memory_usage(adev, &bo->mem, new_mem);
 
 	trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
 }
 
 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
-	struct amdgpu_device *adev;
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 	struct amdgpu_bo *abo;
 	unsigned long offset, size, lpfn;
 	int i, r;
@@ -879,13 +892,14 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 		return 0;
 
 	abo = container_of(bo, struct amdgpu_bo, tbo);
-	adev = abo->adev;
 	if (bo->mem.mem_type != TTM_PL_VRAM)
 		return 0;
 
 	size = bo->mem.num_pages << PAGE_SHIFT;
 	offset = bo->mem.start << PAGE_SHIFT;
-	if ((offset + size) <= adev->mc.visible_vram_size)
+	/* TODO: figure out how to map scattered VRAM to the CPU */
+	if ((offset + size) <= adev->mc.visible_vram_size &&
+	    (abo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS))
 		return 0;
 
 	/* Can't move a pinned BO to visible VRAM */
@@ -893,6 +907,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 		return -EINVAL;
 
 	/* hurrah the memory is not visible ! */
+	abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 	amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
 	lpfn =	adev->mc.visible_vram_size >> PAGE_SHIFT;
 	for (i = 0; i < abo->placement.num_placement; i++) {
@@ -954,6 +969,8 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
 	WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
 		     !bo->pin_count);
 	WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
+	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+		     !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
 
 	return bo->tbo.offset;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 8255034d73eb10061c32a175b02b03f131587280..d3baf834ac24b8f7802689b8662f07b74ca9ed12 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -71,12 +71,13 @@ static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
  */
 static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
 {
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 	int r;
 
 	r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
 	if (unlikely(r != 0)) {
 		if (r != -ERESTARTSYS)
-			dev_err(bo->adev->dev, "%p reserve failed\n", bo);
+			dev_err(adev->dev, "%p reserve failed\n", bo);
 		return r;
 	}
 	return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index accc908bdc880b2bacdc5ba3486b624420a7e2f6..274f3309aec954505677735784684175c6f65a00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -986,10 +986,10 @@ static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
 
 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
 {
-	int i;
 	struct amdgpu_ps *ps;
 	enum amd_pm_state_type dpm_state;
 	int ret;
+	bool equal;
 
 	/* if dpm init failed */
 	if (!adev->pm.dpm_enabled)
@@ -1009,46 +1009,6 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
 	else
 		return;
 
-	/* no need to reprogram if nothing changed unless we are on BTC+ */
-	if (adev->pm.dpm.current_ps == adev->pm.dpm.requested_ps) {
-		/* vce just modifies an existing state so force a change */
-		if (ps->vce_active != adev->pm.dpm.vce_active)
-			goto force;
-		if (adev->flags & AMD_IS_APU) {
-			/* for APUs if the num crtcs changed but state is the same,
-			 * all we need to do is update the display configuration.
-			 */
-			if (adev->pm.dpm.new_active_crtcs != adev->pm.dpm.current_active_crtcs) {
-				/* update display watermarks based on new power state */
-				amdgpu_display_bandwidth_update(adev);
-				/* update displays */
-				amdgpu_dpm_display_configuration_changed(adev);
-				adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
-				adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
-			}
-			return;
-		} else {
-			/* for BTC+ if the num crtcs hasn't changed and state is the same,
-			 * nothing to do, if the num crtcs is > 1 and state is the same,
-			 * update display configuration.
-			 */
-			if (adev->pm.dpm.new_active_crtcs ==
-			    adev->pm.dpm.current_active_crtcs) {
-				return;
-			} else if ((adev->pm.dpm.current_active_crtc_count > 1) &&
-				   (adev->pm.dpm.new_active_crtc_count > 1)) {
-				/* update display watermarks based on new power state */
-				amdgpu_display_bandwidth_update(adev);
-				/* update displays */
-				amdgpu_dpm_display_configuration_changed(adev);
-				adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
-				adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
-				return;
-			}
-		}
-	}
-
-force:
 	if (amdgpu_dpm == 1) {
 		printk("switching from power state:\n");
 		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
@@ -1059,31 +1019,21 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
 	/* update whether vce is active */
 	ps->vce_active = adev->pm.dpm.vce_active;
 
+	amdgpu_dpm_display_configuration_changed(adev);
+
 	ret = amdgpu_dpm_pre_set_power_state(adev);
 	if (ret)
 		return;
 
-	/* update display watermarks based on new power state */
-	amdgpu_display_bandwidth_update(adev);
+	if ((0 != amgdpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)))
+		equal = false;
 
-	/* wait for the rings to drain */
-	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
-		struct amdgpu_ring *ring = adev->rings[i];
-		if (ring && ring->ready)
-			amdgpu_fence_wait_empty(ring);
-	}
+	if (equal)
+		return;
 
-	/* program the new power state */
 	amdgpu_dpm_set_power_state(adev);
-
-	/* update current power state */
-	adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps;
-
 	amdgpu_dpm_post_set_power_state(adev);
 
-	/* update displays */
-	amdgpu_dpm_display_configuration_changed(adev);
-
 	adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
 	adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
 
@@ -1135,7 +1085,7 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
 			mutex_lock(&adev->pm.mutex);
 			adev->pm.dpm.vce_active = true;
 			/* XXX select vce level based on ring/task */
-			adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
+			adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
 			mutex_unlock(&adev->pm.mutex);
 		} else {
 			mutex_lock(&adev->pm.mutex);
@@ -1276,20 +1226,20 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
 	struct drm_device *ddev = adev->ddev;
 	struct drm_crtc *crtc;
 	struct amdgpu_crtc *amdgpu_crtc;
+	int i = 0;
 
 	if (!adev->pm.dpm_enabled)
 		return;
 
-	if (adev->pp_enabled) {
-		int i = 0;
+	amdgpu_display_bandwidth_update(adev);
 
-		amdgpu_display_bandwidth_update(adev);
-		for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
-			struct amdgpu_ring *ring = adev->rings[i];
-			if (ring && ring->ready)
-				amdgpu_fence_wait_empty(ring);
-		}
+	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+		struct amdgpu_ring *ring = adev->rings[i];
+		if (ring && ring->ready)
+			amdgpu_fence_wait_empty(ring);
+	}
 
+	if (adev->pp_enabled) {
 		amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
 	} else {
 		mutex_lock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 7532ff822aa753496ef85971a2fef15db9b28832..fa6baf31a35ddae9d45e3b7be9fbd86ef22cc15e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -299,7 +299,7 @@ static int amdgpu_pp_soft_reset(void *handle)
 	return ret;
 }
 
-const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
+static const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
 	.name = "amdgpu_powerplay",
 	.early_init = amdgpu_pp_early_init,
 	.late_init = amdgpu_pp_late_init,
@@ -316,3 +316,12 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
 	.set_clockgating_state = amdgpu_pp_set_clockgating_state,
 	.set_powergating_state = amdgpu_pp_set_powergating_state,
 };
+
+const struct amdgpu_ip_block_version amdgpu_pp_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_SMC,
+	.major = 1,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &amdgpu_pp_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
index da5cf47cfd99af563b67db388d53c174e76dddad..c0c4bfdcdb1458f06702cd276a8a6622d8470c24 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
@@ -23,11 +23,11 @@
  *
  */
 
-#ifndef __AMDGPU_POPWERPLAY_H__
-#define __AMDGPU_POPWERPLAY_H__
+#ifndef __AMDGPU_POWERPLAY_H__
+#define __AMDGPU_POWERPLAY_H__
 
 #include "amd_shared.h"
 
-extern const struct amd_ip_funcs amdgpu_pp_ip_funcs;
+extern const struct amdgpu_ip_block_version amdgpu_pp_ip_block;
 
-#endif /* __AMDSOC_DM_H__ */
+#endif /* __AMDGPU_POWERPLAY_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 3cb5e903cd62896529d2ea18d5ea7f49c2feae73..4c992826d2d65313161807d44ef224e24d78c1bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -65,7 +65,7 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
 {
 	/* Align requested size with padding so unlock_commit can
 	 * pad safely */
-	ndw = (ndw + ring->align_mask) & ~ring->align_mask;
+	ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
 
 	/* Make sure we aren't trying to allocate more space
 	 * than the maximum for one submission
@@ -94,7 +94,7 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 	int i;
 
 	for (i = 0; i < count; i++)
-		amdgpu_ring_write(ring, ring->nop);
+		amdgpu_ring_write(ring, ring->funcs->nop);
 }
 
 /** amdgpu_ring_generic_pad_ib - pad IB with NOP packets
@@ -106,8 +106,8 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
  */
 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 {
-	while (ib->length_dw & ring->align_mask)
-		ib->ptr[ib->length_dw++] = ring->nop;
+	while (ib->length_dw & ring->funcs->align_mask)
+		ib->ptr[ib->length_dw++] = ring->funcs->nop;
 }
 
 /**
@@ -125,8 +125,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
 	uint32_t count;
 
 	/* We pad to match fetch size */
-	count = ring->align_mask + 1 - (ring->wptr & ring->align_mask);
-	count %= ring->align_mask + 1;
+	count = ring->funcs->align_mask + 1 -
+		(ring->wptr & ring->funcs->align_mask);
+	count %= ring->funcs->align_mask + 1;
 	ring->funcs->insert_nop(ring, count);
 
 	mb();
@@ -163,9 +164,8 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
  * Returns 0 on success, error on failure.
  */
 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
-		     unsigned max_dw, u32 nop, u32 align_mask,
-		     struct amdgpu_irq_src *irq_src, unsigned irq_type,
-		     enum amdgpu_ring_type ring_type)
+		     unsigned max_dw, struct amdgpu_irq_src *irq_src,
+		     unsigned irq_type)
 {
 	int r;
 
@@ -216,9 +216,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
 
 	ring->ring_size = roundup_pow_of_two(max_dw * 4 *
 					     amdgpu_sched_hw_submission);
-	ring->align_mask = align_mask;
-	ring->nop = nop;
-	ring->type = ring_type;
 
 	/* Allocate ring buffer */
 	if (ring->ring_obj == NULL) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
new file mode 100644
index 0000000000000000000000000000000000000000..1ee1b65d7eff14c5e706c114d7b00a17afd7d113
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#ifndef __AMDGPU_RING_H__
+#define __AMDGPU_RING_H__
+
+#include "gpu_scheduler.h"
+
+/* max number of rings */
+#define AMDGPU_MAX_RINGS		16
+#define AMDGPU_MAX_GFX_RINGS		1
+#define AMDGPU_MAX_COMPUTE_RINGS	8
+#define AMDGPU_MAX_VCE_RINGS		3
+
+/* some special values for the owner field */
+#define AMDGPU_FENCE_OWNER_UNDEFINED	((void*)0ul)
+#define AMDGPU_FENCE_OWNER_VM		((void*)1ul)
+
+#define AMDGPU_FENCE_FLAG_64BIT         (1 << 0)
+#define AMDGPU_FENCE_FLAG_INT           (1 << 1)
+
+enum amdgpu_ring_type {
+	AMDGPU_RING_TYPE_GFX,
+	AMDGPU_RING_TYPE_COMPUTE,
+	AMDGPU_RING_TYPE_SDMA,
+	AMDGPU_RING_TYPE_UVD,
+	AMDGPU_RING_TYPE_VCE
+};
+
+struct amdgpu_device;
+struct amdgpu_ring;
+struct amdgpu_ib;
+struct amdgpu_cs_parser;
+
+/*
+ * Fences.
+ */
+struct amdgpu_fence_driver {
+	uint64_t			gpu_addr;
+	volatile uint32_t		*cpu_addr;
+	/* sync_seq is protected by ring emission lock */
+	uint32_t			sync_seq;
+	atomic_t			last_seq;
+	bool				initialized;
+	struct amdgpu_irq_src		*irq_src;
+	unsigned			irq_type;
+	struct timer_list		fallback_timer;
+	unsigned			num_fences_mask;
+	spinlock_t			lock;
+	struct fence			**fences;
+};
+
+int amdgpu_fence_driver_init(struct amdgpu_device *adev);
+void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
+void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
+
+int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
+				  unsigned num_hw_submission);
+int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
+				   struct amdgpu_irq_src *irq_src,
+				   unsigned irq_type);
+void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
+void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence);
+void amdgpu_fence_process(struct amdgpu_ring *ring);
+int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
+unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
+
+/*
+ * Rings.
+ */
+
+/* provided by hw blocks that expose a ring buffer for commands */
+struct amdgpu_ring_funcs {
+	enum amdgpu_ring_type	type;
+	uint32_t		align_mask;
+	u32			nop;
+
+	/* ring read/write ptr handling */
+	u32 (*get_rptr)(struct amdgpu_ring *ring);
+	u32 (*get_wptr)(struct amdgpu_ring *ring);
+	void (*set_wptr)(struct amdgpu_ring *ring);
+	/* validating and patching of IBs */
+	int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
+	/* constants to calculate how many DW are needed for an emit */
+	unsigned emit_frame_size;
+	unsigned emit_ib_size;
+	/* command emit functions */
+	void (*emit_ib)(struct amdgpu_ring *ring,
+			struct amdgpu_ib *ib,
+			unsigned vm_id, bool ctx_switch);
+	void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
+			   uint64_t seq, unsigned flags);
+	void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
+	void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
+			      uint64_t pd_addr);
+	void (*emit_hdp_flush)(struct amdgpu_ring *ring);
+	void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
+	void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
+				uint32_t gds_base, uint32_t gds_size,
+				uint32_t gws_base, uint32_t gws_size,
+				uint32_t oa_base, uint32_t oa_size);
+	/* testing functions */
+	int (*test_ring)(struct amdgpu_ring *ring);
+	int (*test_ib)(struct amdgpu_ring *ring, long timeout);
+	/* insert NOP packets */
+	void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
+	/* pad the indirect buffer to the necessary number of dw */
+	void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
+	unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
+	void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
+	/* note usage for clock and power gating */
+	void (*begin_use)(struct amdgpu_ring *ring);
+	void (*end_use)(struct amdgpu_ring *ring);
+	void (*emit_switch_buffer) (struct amdgpu_ring *ring);
+	void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
+};
+
+struct amdgpu_ring {
+	struct amdgpu_device		*adev;
+	const struct amdgpu_ring_funcs	*funcs;
+	struct amdgpu_fence_driver	fence_drv;
+	struct amd_gpu_scheduler	sched;
+
+	struct amdgpu_bo	*ring_obj;
+	volatile uint32_t	*ring;
+	unsigned		rptr_offs;
+	unsigned		wptr;
+	unsigned		wptr_old;
+	unsigned		ring_size;
+	unsigned		max_dw;
+	int			count_dw;
+	uint64_t		gpu_addr;
+	uint32_t		ptr_mask;
+	bool			ready;
+	u32			idx;
+	u32			me;
+	u32			pipe;
+	u32			queue;
+	struct amdgpu_bo	*mqd_obj;
+	u32			doorbell_index;
+	bool			use_doorbell;
+	unsigned		wptr_offs;
+	unsigned		fence_offs;
+	uint64_t		current_ctx;
+	char			name[16];
+	unsigned		cond_exe_offs;
+	u64			cond_exe_gpu_addr;
+	volatile u32		*cond_exe_cpu_addr;
+#if defined(CONFIG_DEBUG_FS)
+	struct dentry *ent;
+#endif
+};
+
+int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
+void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
+void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
+void amdgpu_ring_commit(struct amdgpu_ring *ring);
+void amdgpu_ring_undo(struct amdgpu_ring *ring);
+int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
+		     unsigned ring_size, struct amdgpu_irq_src *irq_src,
+		     unsigned irq_type);
+void amdgpu_ring_fini(struct amdgpu_ring *ring);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
new file mode 100644
index 0000000000000000000000000000000000000000..405f379ac186ff5a745bdfbbf2432ef1d3d41cab
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#ifndef __AMDGPU_SYNC_H__
+#define __AMDGPU_SYNC_H__
+
+#include <linux/hashtable.h>
+
+struct fence;
+struct reservation_object;
+struct amdgpu_device;
+struct amdgpu_ring;
+
+/*
+ * Container for fences used to sync command submissions.
+ */
+struct amdgpu_sync {
+	DECLARE_HASHTABLE(fences, 4);
+	struct fence	*last_vm_update;
+};
+
+void amdgpu_sync_create(struct amdgpu_sync *sync);
+int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+		      struct fence *f);
+int amdgpu_sync_resv(struct amdgpu_device *adev,
+		     struct amdgpu_sync *sync,
+		     struct reservation_object *resv,
+		     void *owner);
+struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+				     struct amdgpu_ring *ring);
+struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
+void amdgpu_sync_free(struct amdgpu_sync *sync);
+int amdgpu_sync_init(void);
+void amdgpu_sync_fini(void);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index dcaf691f56b5577352d2238bdfd27e2f1aca2386..f1a206df982356b2315c61aad9ba52c406f49654 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -51,16 +51,6 @@
 static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
 static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
 
-static struct amdgpu_device *amdgpu_get_adev(struct ttm_bo_device *bdev)
-{
-	struct amdgpu_mman *mman;
-	struct amdgpu_device *adev;
-
-	mman = container_of(bdev, struct amdgpu_mman, bdev);
-	adev = container_of(mman, struct amdgpu_device, mman);
-	return adev;
-}
-
 
 /*
  * Global memory.
@@ -150,7 +140,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 {
 	struct amdgpu_device *adev;
 
-	adev = amdgpu_get_adev(bdev);
+	adev = amdgpu_ttm_adev(bdev);
 
 	switch (type) {
 	case TTM_PL_SYSTEM:
@@ -168,7 +158,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 		break;
 	case TTM_PL_VRAM:
 		/* "On-card" video ram */
-		man->func = &ttm_bo_manager_func;
+		man->func = &amdgpu_vram_mgr_func;
 		man->gpu_offset = adev->mc.vram_start;
 		man->flags = TTM_MEMTYPE_FLAG_FIXED |
 			     TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -195,6 +185,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 				struct ttm_placement *placement)
 {
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 	struct amdgpu_bo *abo;
 	static struct ttm_place placements = {
 		.fpfn = 0,
@@ -213,7 +204,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 	abo = container_of(bo, struct amdgpu_bo, tbo);
 	switch (bo->mem.mem_type) {
 	case TTM_PL_VRAM:
-		if (abo->adev->mman.buffer_funcs_ring->ready == false) {
+		if (adev->mman.buffer_funcs_ring->ready == false) {
 			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
 		} else {
 			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
@@ -229,7 +220,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 				 * allocating address space for the BO.
 				 */
 				abo->placements[i].lpfn =
-					abo->adev->mc.gtt_size >> PAGE_SHIFT;
+					adev->mc.gtt_size >> PAGE_SHIFT;
 			}
 		}
 		break;
@@ -260,64 +251,116 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
 	new_mem->mm_node = NULL;
 }
 
-static int amdgpu_move_blit(struct ttm_buffer_object *bo,
-			bool evict, bool no_wait_gpu,
-			struct ttm_mem_reg *new_mem,
-			struct ttm_mem_reg *old_mem)
+static int amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
+			       struct drm_mm_node *mm_node,
+			       struct ttm_mem_reg *mem,
+			       uint64_t *addr)
 {
-	struct amdgpu_device *adev;
-	struct amdgpu_ring *ring;
-	uint64_t old_start, new_start;
-	struct fence *fence;
 	int r;
 
-	adev = amdgpu_get_adev(bo->bdev);
-	ring = adev->mman.buffer_funcs_ring;
-
-	switch (old_mem->mem_type) {
+	switch (mem->mem_type) {
 	case TTM_PL_TT:
-		r = amdgpu_ttm_bind(bo, old_mem);
+		r = amdgpu_ttm_bind(bo, mem);
 		if (r)
 			return r;
 
 	case TTM_PL_VRAM:
-		old_start = (u64)old_mem->start << PAGE_SHIFT;
-		old_start += bo->bdev->man[old_mem->mem_type].gpu_offset;
+		*addr = mm_node->start << PAGE_SHIFT;
+		*addr += bo->bdev->man[mem->mem_type].gpu_offset;
 		break;
 	default:
-		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
+		DRM_ERROR("Unknown placement %d\n", mem->mem_type);
 		return -EINVAL;
 	}
-	switch (new_mem->mem_type) {
-	case TTM_PL_TT:
-		r = amdgpu_ttm_bind(bo, new_mem);
-		if (r)
-			return r;
 
-	case TTM_PL_VRAM:
-		new_start = (u64)new_mem->start << PAGE_SHIFT;
-		new_start += bo->bdev->man[new_mem->mem_type].gpu_offset;
-		break;
-	default:
-		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
-		return -EINVAL;
-	}
+	return 0;
+}
+
+static int amdgpu_move_blit(struct ttm_buffer_object *bo,
+			    bool evict, bool no_wait_gpu,
+			    struct ttm_mem_reg *new_mem,
+			    struct ttm_mem_reg *old_mem)
+{
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+
+	struct drm_mm_node *old_mm, *new_mm;
+	uint64_t old_start, old_size, new_start, new_size;
+	unsigned long num_pages;
+	struct fence *fence = NULL;
+	int r;
+
+	BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
+
 	if (!ring->ready) {
 		DRM_ERROR("Trying to move memory with ring turned off.\n");
 		return -EINVAL;
 	}
 
-	BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
+	old_mm = old_mem->mm_node;
+	r = amdgpu_mm_node_addr(bo, old_mm, old_mem, &old_start);
+	if (r)
+		return r;
+	old_size = old_mm->size;
+
 
-	r = amdgpu_copy_buffer(ring, old_start, new_start,
-			       new_mem->num_pages * PAGE_SIZE, /* bytes */
-			       bo->resv, &fence, false);
+	new_mm = new_mem->mm_node;
+	r = amdgpu_mm_node_addr(bo, new_mm, new_mem, &new_start);
 	if (r)
 		return r;
+	new_size = new_mm->size;
+
+	num_pages = new_mem->num_pages;
+	while (num_pages) {
+		unsigned long cur_pages = min(old_size, new_size);
+		struct fence *next;
+
+		r = amdgpu_copy_buffer(ring, old_start, new_start,
+				       cur_pages * PAGE_SIZE,
+				       bo->resv, &next, false);
+		if (r)
+			goto error;
+
+		fence_put(fence);
+		fence = next;
+
+		num_pages -= cur_pages;
+		if (!num_pages)
+			break;
+
+		old_size -= cur_pages;
+		if (!old_size) {
+			r = amdgpu_mm_node_addr(bo, ++old_mm, old_mem,
+						&old_start);
+			if (r)
+				goto error;
+			old_size = old_mm->size;
+		} else {
+			old_start += cur_pages * PAGE_SIZE;
+		}
+
+		new_size -= cur_pages;
+		if (!new_size) {
+			r = amdgpu_mm_node_addr(bo, ++new_mm, new_mem,
+						&new_start);
+			if (r)
+				goto error;
+
+			new_size = new_mm->size;
+		} else {
+			new_start += cur_pages * PAGE_SIZE;
+		}
+	}
 
 	r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
 	fence_put(fence);
 	return r;
+
+error:
+	if (fence)
+		fence_wait(fence, false);
+	fence_put(fence);
+	return r;
 }
 
 static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
@@ -332,7 +375,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
 	struct ttm_placement placement;
 	int r;
 
-	adev = amdgpu_get_adev(bo->bdev);
+	adev = amdgpu_ttm_adev(bo->bdev);
 	tmp_mem = *new_mem;
 	tmp_mem.mm_node = NULL;
 	placement.num_placement = 1;
@@ -379,7 +422,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
 	struct ttm_place placements;
 	int r;
 
-	adev = amdgpu_get_adev(bo->bdev);
+	adev = amdgpu_ttm_adev(bo->bdev);
 	tmp_mem = *new_mem;
 	tmp_mem.mm_node = NULL;
 	placement.num_placement = 1;
@@ -422,7 +465,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
 	if (WARN_ON_ONCE(abo->pin_count > 0))
 		return -EINVAL;
 
-	adev = amdgpu_get_adev(bo->bdev);
+	adev = amdgpu_ttm_adev(bo->bdev);
 
 	/* remember the eviction */
 	if (evict)
@@ -475,7 +518,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
 static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 {
 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-	struct amdgpu_device *adev = amdgpu_get_adev(bdev);
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
 
 	mem->bus.addr = NULL;
 	mem->bus.offset = 0;
@@ -607,7 +650,7 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
 /* prepare the sg table with the user pages */
 static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
 {
-	struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
+	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 	unsigned nents;
 	int r;
@@ -639,7 +682,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
 
 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
 {
-	struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
+	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 	struct sg_page_iter sg_iter;
 
@@ -799,7 +842,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
 	struct amdgpu_device *adev;
 	struct amdgpu_ttm_tt *gtt;
 
-	adev = amdgpu_get_adev(bdev);
+	adev = amdgpu_ttm_adev(bdev);
 
 	gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
 	if (gtt == NULL) {
@@ -843,7 +886,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
 		return 0;
 	}
 
-	adev = amdgpu_get_adev(ttm->bdev);
+	adev = amdgpu_ttm_adev(ttm->bdev);
 
 #ifdef CONFIG_SWIOTLB
 	if (swiotlb_nr_tbl()) {
@@ -889,7 +932,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
 	if (slave)
 		return;
 
-	adev = amdgpu_get_adev(ttm->bdev);
+	adev = amdgpu_ttm_adev(ttm->bdev);
 
 #ifdef CONFIG_SWIOTLB
 	if (swiotlb_nr_tbl()) {
@@ -1012,7 +1055,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
 
 static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
 {
-	struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev);
+	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
 	unsigned i, j;
 
 	for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) {
@@ -1029,7 +1072,7 @@ static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
 
 static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo)
 {
-	struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev);
+	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
 	unsigned log2_size = min(ilog2(tbo->num_pages),
 				 AMDGPU_TTM_LRU_SIZE - 1);
 
@@ -1060,12 +1103,37 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
 	return res;
 }
 
+static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+					    const struct ttm_place *place)
+{
+	if (bo->mem.mem_type == TTM_PL_VRAM &&
+	    bo->mem.start == AMDGPU_BO_INVALID_OFFSET) {
+		unsigned long num_pages = bo->mem.num_pages;
+		struct drm_mm_node *node = bo->mem.mm_node;
+
+		/* Check each drm MM node individually */
+		while (num_pages) {
+			if (place->fpfn < (node->start + node->size) &&
+			    !(place->lpfn && place->lpfn <= node->start))
+				return true;
+
+			num_pages -= node->size;
+			++node;
+		}
+
+		return false;
+	}
+
+	return ttm_bo_eviction_valuable(bo, place);
+}
+
 static struct ttm_bo_driver amdgpu_bo_driver = {
 	.ttm_tt_create = &amdgpu_ttm_tt_create,
 	.ttm_tt_populate = &amdgpu_ttm_tt_populate,
 	.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
 	.invalidate_caches = &amdgpu_invalidate_caches,
 	.init_mem_type = &amdgpu_init_mem_type,
+	.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
 	.evict_flags = &amdgpu_evict_flags,
 	.move = &amdgpu_bo_move,
 	.verify_access = &amdgpu_verify_access,
@@ -1119,7 +1187,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
 
 	r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
 			     AMDGPU_GEM_DOMAIN_VRAM,
-			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+			     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 			     NULL, NULL, &adev->stollen_vga_memory);
 	if (r) {
 		return r;
@@ -1317,7 +1386,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 		struct reservation_object *resv,
 		struct fence **fence)
 {
-	struct amdgpu_device *adev = bo->adev;
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 	struct amdgpu_job *job;
 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 9812c805326c383cc02bdbc537f056e2d98bd40c..d1c00c04782fce066aad96fa296c91e6fc0541b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -66,6 +66,7 @@ struct amdgpu_mman {
 };
 
 extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
+extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
 
 int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
 			 struct ttm_buffer_object *tbo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index cb3d252f3c78cb03b30252e4b558ccd5c1b6b866..0f0b38191fac899f90592c107fca8eee826b5448 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -228,6 +228,9 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode,
 	ucode->mc_addr = mc_addr;
 	ucode->kaddr = kptr;
 
+	if (ucode->ucode_id == AMDGPU_UCODE_ID_STORAGE)
+		return 0;
+
 	header = (const struct common_firmware_header *)ucode->fw->data;
 	memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
 		le32_to_cpu(header->ucode_array_offset_bytes)),
@@ -236,6 +239,31 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode,
 	return 0;
 }
 
+static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
+				uint64_t mc_addr, void *kptr)
+{
+	const struct gfx_firmware_header_v1_0 *header = NULL;
+	const struct common_firmware_header *comm_hdr = NULL;
+	uint8_t* src_addr = NULL;
+	uint8_t* dst_addr = NULL;
+
+	if (NULL == ucode->fw)
+		return 0;
+
+	comm_hdr = (const struct common_firmware_header *)ucode->fw->data;
+	header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
+	dst_addr = ucode->kaddr +
+			   ALIGN(le32_to_cpu(comm_hdr->ucode_size_bytes),
+			   PAGE_SIZE);
+	src_addr = (uint8_t *)ucode->fw->data +
+			   le32_to_cpu(comm_hdr->ucode_array_offset_bytes) +
+			   (le32_to_cpu(header->jt_offset) * 4);
+	memcpy(dst_addr, src_addr, le32_to_cpu(header->jt_size) * 4);
+
+	return 0;
+}
+
+
 int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
 {
 	struct amdgpu_bo **bo = &adev->firmware.fw_buf;
@@ -247,7 +275,8 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
 	const struct common_firmware_header *header = NULL;
 
 	err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
-			       AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
+				amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+				0, NULL, NULL, bo);
 	if (err) {
 		dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
 		goto failed;
@@ -259,7 +288,8 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
 		goto failed_reserve;
 	}
 
-	err = amdgpu_bo_pin(*bo, AMDGPU_GEM_DOMAIN_GTT, &fw_mc_addr);
+	err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+				&fw_mc_addr);
 	if (err) {
 		dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
 		goto failed_pin;
@@ -279,6 +309,13 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
 			header = (const struct common_firmware_header *)ucode->fw->data;
 			amdgpu_ucode_init_single_fw(ucode, fw_mc_addr + fw_offset,
 						    fw_buf_ptr + fw_offset);
+			if (i == AMDGPU_UCODE_ID_CP_MEC1) {
+				const struct gfx_firmware_header_v1_0 *cp_hdr;
+				cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
+				amdgpu_ucode_patch_jt(ucode, fw_mc_addr + fw_offset,
+						    fw_buf_ptr + fw_offset);
+				fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
+			}
 			fw_offset += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
 		}
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index e468be4e28fa372bd3284a61183096c8b78e260b..a8a4230729f952074219dd5db3f025d609577ca2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -130,6 +130,7 @@ enum AMDGPU_UCODE_ID {
 	AMDGPU_UCODE_ID_CP_MEC1,
 	AMDGPU_UCODE_ID_CP_MEC2,
 	AMDGPU_UCODE_ID_RLC_G,
+	AMDGPU_UCODE_ID_STORAGE,
 	AMDGPU_UCODE_ID_MAXIMUM,
 };
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e3281cacc586020843886593531e5f160b7952fc..1b54cc218b4732f46106806d1615f1d17a1abacf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -876,6 +876,9 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
 	struct amdgpu_ib *ib = &parser->job->ibs[ib_idx];
 	int r;
 
+	parser->job->vm = NULL;
+	ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
+
 	if (ib->length_dw % 16) {
 		DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
 			  ib->length_dw);
@@ -931,7 +934,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 	if (r)
 		return r;
 
-	if (!bo->adev->uvd.address_64_bit) {
+	if (!ring->adev->uvd.address_64_bit) {
 		amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
 		amdgpu_uvd_force_into_uvd_segment(bo);
 	}
@@ -1002,7 +1005,8 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 
 	r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
 			     AMDGPU_GEM_DOMAIN_VRAM,
-			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+			     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 			     NULL, NULL, &bo);
 	if (r)
 		return r;
@@ -1051,7 +1055,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 
 	r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
 			     AMDGPU_GEM_DOMAIN_VRAM,
-			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+			     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 			     NULL, NULL, &bo);
 	if (r)
 		return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 7fe8fd884f0600a83d98c6230f8a7292395b1f1e..3d6f86cd028f6b87b829ee6c6ff824b9253dfeb0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -157,7 +157,8 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
 
 	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
 			     AMDGPU_GEM_DOMAIN_VRAM,
-			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+			     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 			     NULL, NULL, &adev->vce.vcpu_bo);
 	if (r) {
 		dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
@@ -641,6 +642,9 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
 	uint32_t *size = &tmp;
 	int i, r, idx = 0;
 
+	p->job->vm = NULL;
+	ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
+
 	r = amdgpu_cs_sysvm_access_required(p);
 	if (r)
 		return r;
@@ -787,6 +791,96 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
 	return r;
 }
 
+/**
+ * amdgpu_vce_cs_parse_vm - parse the command stream in VM mode
+ *
+ * @p: parser context
+ *
+ */
+int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
+{
+	struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
+	int session_idx = -1;
+	uint32_t destroyed = 0;
+	uint32_t created = 0;
+	uint32_t allocated = 0;
+	uint32_t tmp, handle = 0;
+	int i, r = 0, idx = 0;
+
+	while (idx < ib->length_dw) {
+		uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
+		uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
+
+		if ((len < 8) || (len & 3)) {
+			DRM_ERROR("invalid VCE command length (%d)!\n", len);
+			r = -EINVAL;
+			goto out;
+		}
+
+		switch (cmd) {
+		case 0x00000001: /* session */
+			handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
+			session_idx = amdgpu_vce_validate_handle(p, handle,
+								 &allocated);
+			if (session_idx < 0) {
+				r = session_idx;
+				goto out;
+			}
+			break;
+
+		case 0x01000001: /* create */
+			created |= 1 << session_idx;
+			if (destroyed & (1 << session_idx)) {
+				destroyed &= ~(1 << session_idx);
+				allocated |= 1 << session_idx;
+
+			} else if (!(allocated & (1 << session_idx))) {
+				DRM_ERROR("Handle already in use!\n");
+				r = -EINVAL;
+				goto out;
+			}
+
+			break;
+
+		case 0x02000001: /* destroy */
+			destroyed |= 1 << session_idx;
+			break;
+
+		default:
+			break;
+		}
+
+		if (session_idx == -1) {
+			DRM_ERROR("no session command at start of IB\n");
+			r = -EINVAL;
+			goto out;
+		}
+
+		idx += len / 4;
+	}
+
+	if (allocated & ~created) {
+		DRM_ERROR("New session without create command!\n");
+		r = -ENOENT;
+	}
+
+out:
+	if (!r) {
+		/* No error, free all destroyed handle slots */
+		tmp = destroyed;
+		amdgpu_ib_free(p->adev, ib, NULL);
+	} else {
+		/* Error during parsing, free all allocated handle slots */
+		tmp = allocated;
+	}
+
+	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
+		if (tmp & (1 << i))
+			atomic_set(&p->adev->vce.handles[i], 0);
+
+	return r;
+}
+
 /**
  * amdgpu_vce_ring_emit_ib - execute indirect buffer
  *
@@ -823,18 +917,6 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 	amdgpu_ring_write(ring, VCE_CMD_END);
 }
 
-unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
-	return
-		4; /* amdgpu_vce_ring_emit_ib */
-}
-
-unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
-	return
-		6; /* amdgpu_vce_ring_emit_fence  x1 no user fence */
-}
-
 /**
  * amdgpu_vce_ring_test_ring - test if VCE ring is working
  *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 12729d2852df42e3c4706bb9b5246085b13549c8..44d49b576513076a278a54586063c35267792588 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -34,6 +34,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 			       bool direct, struct fence **fence);
 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
+int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
 			     unsigned vm_id, bool ctx_switch);
 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 06f24322e7c31bcfbb9dc867909ebe234968accb..ded57dd538e2661a410676ce9175542e4e76f9c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -116,38 +116,43 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
 }
 
 /**
- * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
+ * amdgpu_vm_validate_pt_bos - validate the page table BOs
  *
  * @adev: amdgpu device pointer
  * @vm: vm providing the BOs
- * @duplicates: head of duplicates list
+ * @validate: callback to do the validation
+ * @param: parameter for the validation callback
  *
- * Add the page directory to the BO duplicates list
- * for command submission.
+ * Validate the page table BOs on command submission if neccessary.
  */
-void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-			  struct list_head *duplicates)
+int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+			      int (*validate)(void *p, struct amdgpu_bo *bo),
+			      void *param)
 {
 	uint64_t num_evictions;
 	unsigned i;
+	int r;
 
 	/* We only need to validate the page tables
 	 * if they aren't already valid.
 	 */
 	num_evictions = atomic64_read(&adev->num_evictions);
 	if (num_evictions == vm->last_eviction_counter)
-		return;
+		return 0;
 
 	/* add the vm page table to the list */
 	for (i = 0; i <= vm->max_pde_used; ++i) {
-		struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
+		struct amdgpu_bo *bo = vm->page_tables[i].bo;
 
-		if (!entry->robj)
+		if (!bo)
 			continue;
 
-		list_add(&entry->tv.head, duplicates);
+		r = validate(param, bo);
+		if (r)
+			return r;
 	}
 
+	return 0;
 }
 
 /**
@@ -166,12 +171,12 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
 
 	spin_lock(&glob->lru_lock);
 	for (i = 0; i <= vm->max_pde_used; ++i) {
-		struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
+		struct amdgpu_bo *bo = vm->page_tables[i].bo;
 
-		if (!entry->robj)
+		if (!bo)
 			continue;
 
-		ttm_bo_move_to_lru_tail(&entry->robj->tbo);
+		ttm_bo_move_to_lru_tail(&bo->tbo);
 	}
 	spin_unlock(&glob->lru_lock);
 }
@@ -341,9 +346,9 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
-	const struct amdgpu_ip_block_version *ip_block;
+	const struct amdgpu_ip_block *ip_block;
 
-	if (ring->type != AMDGPU_RING_TYPE_COMPUTE)
+	if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
 		/* only compute rings */
 		return false;
 
@@ -351,10 +356,10 @@ static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
 	if (!ip_block)
 		return false;
 
-	if (ip_block->major <= 7) {
+	if (ip_block->version->major <= 7) {
 		/* gfx7 has no workaround */
 		return true;
-	} else if (ip_block->major == 8) {
+	} else if (ip_block->version->major == 8) {
 		if (adev->gfx.mec_fw_version >= 673)
 			/* gfx8 is fixed in MEC firmware 673 */
 			return false;
@@ -612,16 +617,26 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
 	return result;
 }
 
-static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
-					 struct amdgpu_vm *vm,
-					 bool shadow)
+/*
+ * amdgpu_vm_update_pdes - make sure that page directory is valid
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ *
+ * Allocates new page tables if necessary
+ * and updates the page directory.
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+				    struct amdgpu_vm *vm)
 {
+	struct amdgpu_bo *shadow;
 	struct amdgpu_ring *ring;
-	struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow :
-		vm->page_directory;
-	uint64_t pd_addr;
+	uint64_t pd_addr, shadow_addr;
 	uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
-	uint64_t last_pde = ~0, last_pt = ~0;
+	uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
 	unsigned count = 0, pt_idx, ndw;
 	struct amdgpu_job *job;
 	struct amdgpu_pte_update_params params;
@@ -629,15 +644,8 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
 
 	int r;
 
-	if (!pd)
-		return 0;
-
-	r = amdgpu_ttm_bind(&pd->tbo, &pd->tbo.mem);
-	if (r)
-		return r;
-
-	pd_addr = amdgpu_bo_gpu_offset(pd);
 	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+	shadow = vm->page_directory->shadow;
 
 	/* padding, etc. */
 	ndw = 64;
@@ -645,6 +653,17 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
 	/* assume the worst case */
 	ndw += vm->max_pde_used * 6;
 
+	pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
+	if (shadow) {
+		r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
+		if (r)
+			return r;
+		shadow_addr = amdgpu_bo_gpu_offset(shadow);
+		ndw *= 2;
+	} else {
+		shadow_addr = 0;
+	}
+
 	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
 	if (r)
 		return r;
@@ -655,30 +674,26 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
 
 	/* walk over the address space and update the page directory */
 	for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
-		struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
+		struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo;
 		uint64_t pde, pt;
 
 		if (bo == NULL)
 			continue;
 
 		if (bo->shadow) {
-			struct amdgpu_bo *shadow = bo->shadow;
+			struct amdgpu_bo *pt_shadow = bo->shadow;
 
-			r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
+			r = amdgpu_ttm_bind(&pt_shadow->tbo,
+					    &pt_shadow->tbo.mem);
 			if (r)
 				return r;
 		}
 
 		pt = amdgpu_bo_gpu_offset(bo);
-		if (!shadow) {
-			if (vm->page_tables[pt_idx].addr == pt)
-				continue;
-			vm->page_tables[pt_idx].addr = pt;
-		} else {
-			if (vm->page_tables[pt_idx].shadow_addr == pt)
-				continue;
-			vm->page_tables[pt_idx].shadow_addr = pt;
-		}
+		if (vm->page_tables[pt_idx].addr == pt)
+			continue;
+
+		vm->page_tables[pt_idx].addr = pt;
 
 		pde = pd_addr + pt_idx * 8;
 		if (((last_pde + 8 * count) != pde) ||
@@ -686,6 +701,13 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
 		    (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
 
 			if (count) {
+				if (shadow)
+					amdgpu_vm_do_set_ptes(&params,
+							      last_shadow,
+							      last_pt, count,
+							      incr,
+							      AMDGPU_PTE_VALID);
+
 				amdgpu_vm_do_set_ptes(&params, last_pde,
 						      last_pt, count, incr,
 						      AMDGPU_PTE_VALID);
@@ -693,34 +715,44 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
 
 			count = 1;
 			last_pde = pde;
+			last_shadow = shadow_addr + pt_idx * 8;
 			last_pt = pt;
 		} else {
 			++count;
 		}
 	}
 
-	if (count)
+	if (count) {
+		if (vm->page_directory->shadow)
+			amdgpu_vm_do_set_ptes(&params, last_shadow, last_pt,
+					      count, incr, AMDGPU_PTE_VALID);
+
 		amdgpu_vm_do_set_ptes(&params, last_pde, last_pt,
 				      count, incr, AMDGPU_PTE_VALID);
+	}
 
-	if (params.ib->length_dw != 0) {
-		amdgpu_ring_pad_ib(ring, params.ib);
-		amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
+	if (params.ib->length_dw == 0) {
+		amdgpu_job_free(job);
+		return 0;
+	}
+
+	amdgpu_ring_pad_ib(ring, params.ib);
+	amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
+			 AMDGPU_FENCE_OWNER_VM);
+	if (shadow)
+		amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv,
 				 AMDGPU_FENCE_OWNER_VM);
-		WARN_ON(params.ib->length_dw > ndw);
-		r = amdgpu_job_submit(job, ring, &vm->entity,
-				      AMDGPU_FENCE_OWNER_VM, &fence);
-		if (r)
-			goto error_free;
 
-		amdgpu_bo_fence(pd, fence, true);
-		fence_put(vm->page_directory_fence);
-		vm->page_directory_fence = fence_get(fence);
-		fence_put(fence);
+	WARN_ON(params.ib->length_dw > ndw);
+	r = amdgpu_job_submit(job, ring, &vm->entity,
+			      AMDGPU_FENCE_OWNER_VM, &fence);
+	if (r)
+		goto error_free;
 
-	} else {
-		amdgpu_job_free(job);
-	}
+	amdgpu_bo_fence(vm->page_directory, fence, true);
+	fence_put(vm->page_directory_fence);
+	vm->page_directory_fence = fence_get(fence);
+	fence_put(fence);
 
 	return 0;
 
@@ -729,29 +761,6 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
 	return r;
 }
 
-/*
- * amdgpu_vm_update_pdes - make sure that page directory is valid
- *
- * @adev: amdgpu_device pointer
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- *
- * Allocates new page tables if necessary
- * and updates the page directory.
- * Returns 0 for success, error for failure.
- */
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
-                                   struct amdgpu_vm *vm)
-{
-	int r;
-
-	r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
-	if (r)
-		return r;
-	return amdgpu_vm_update_pd_or_shadow(adev, vm, false);
-}
-
 /**
  * amdgpu_vm_update_ptes - make sure that page tables are valid
  *
@@ -781,11 +790,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
 	/* initialize the variables */
 	addr = start;
 	pt_idx = addr >> amdgpu_vm_block_size;
-	pt = vm->page_tables[pt_idx].entry.robj;
+	pt = vm->page_tables[pt_idx].bo;
 	if (params->shadow) {
 		if (!pt->shadow)
 			return;
-		pt = vm->page_tables[pt_idx].entry.robj->shadow;
+		pt = pt->shadow;
 	}
 	if ((addr & ~mask) == (end & ~mask))
 		nptes = end - addr;
@@ -804,11 +813,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
 	/* walk over the address space and update the page tables */
 	while (addr < end) {
 		pt_idx = addr >> amdgpu_vm_block_size;
-		pt = vm->page_tables[pt_idx].entry.robj;
+		pt = vm->page_tables[pt_idx].bo;
 		if (params->shadow) {
 			if (!pt->shadow)
 				return;
-			pt = vm->page_tables[pt_idx].entry.robj->shadow;
+			pt = pt->shadow;
 		}
 
 		if ((addr & ~mask) == (end & ~mask))
@@ -1065,8 +1074,8 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
  * @pages_addr: DMA addresses to use for mapping
  * @vm: requested vm
  * @mapping: mapped range and flags to use for the update
- * @addr: addr to set the area to
  * @flags: HW flags for the mapping
+ * @nodes: array of drm_mm_nodes with the MC addresses
  * @fence: optional resulting fence
  *
  * Split the mapping into smaller chunks so that each update fits
@@ -1079,12 +1088,11 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
 				      dma_addr_t *pages_addr,
 				      struct amdgpu_vm *vm,
 				      struct amdgpu_bo_va_mapping *mapping,
-				      uint32_t flags, uint64_t addr,
+				      uint32_t flags,
+				      struct drm_mm_node *nodes,
 				      struct fence **fence)
 {
-	const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE;
-
-	uint64_t src = 0, start = mapping->it.start;
+	uint64_t pfn, src = 0, start = mapping->it.start;
 	int r;
 
 	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@@ -1097,23 +1105,40 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
 
 	trace_amdgpu_vm_bo_update(mapping);
 
-	if (pages_addr) {
-		if (flags == gtt_flags)
-			src = adev->gart.table_addr + (addr >> 12) * 8;
-		addr = 0;
+	pfn = mapping->offset >> PAGE_SHIFT;
+	if (nodes) {
+		while (pfn >= nodes->size) {
+			pfn -= nodes->size;
+			++nodes;
+		}
 	}
-	addr += mapping->offset;
 
-	if (!pages_addr || src)
-		return amdgpu_vm_bo_update_mapping(adev, exclusive,
-						   src, pages_addr, vm,
-						   start, mapping->it.last,
-						   flags, addr, fence);
+	do {
+		uint64_t max_entries;
+		uint64_t addr, last;
+
+		if (nodes) {
+			addr = nodes->start << PAGE_SHIFT;
+			max_entries = (nodes->size - pfn) *
+				(PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+		} else {
+			addr = 0;
+			max_entries = S64_MAX;
+		}
 
-	while (start != mapping->it.last + 1) {
-		uint64_t last;
+		if (pages_addr) {
+			if (flags == gtt_flags)
+				src = adev->gart.table_addr +
+					(addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
+			else
+				max_entries = min(max_entries, 16ull * 1024ull);
+			addr = 0;
+		} else if (flags & AMDGPU_PTE_VALID) {
+			addr += adev->vm_manager.vram_base_offset;
+		}
+		addr += pfn << PAGE_SHIFT;
 
-		last = min((uint64_t)mapping->it.last, start + max_size - 1);
+		last = min((uint64_t)mapping->it.last, start + max_entries - 1);
 		r = amdgpu_vm_bo_update_mapping(adev, exclusive,
 						src, pages_addr, vm,
 						start, last, flags, addr,
@@ -1121,9 +1146,14 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
 		if (r)
 			return r;
 
+		pfn += last - start + 1;
+		if (nodes && nodes->size == pfn) {
+			pfn = 0;
+			++nodes;
+		}
 		start = last + 1;
-		addr += max_size * AMDGPU_GPU_PAGE_SIZE;
-	}
+
+	} while (unlikely(start != mapping->it.last + 1));
 
 	return 0;
 }
@@ -1147,40 +1177,30 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 	dma_addr_t *pages_addr = NULL;
 	uint32_t gtt_flags, flags;
 	struct ttm_mem_reg *mem;
+	struct drm_mm_node *nodes;
 	struct fence *exclusive;
-	uint64_t addr;
 	int r;
 
 	if (clear) {
 		mem = NULL;
-		addr = 0;
+		nodes = NULL;
 		exclusive = NULL;
 	} else {
 		struct ttm_dma_tt *ttm;
 
 		mem = &bo_va->bo->tbo.mem;
-		addr = (u64)mem->start << PAGE_SHIFT;
-		switch (mem->mem_type) {
-		case TTM_PL_TT:
+		nodes = mem->mm_node;
+		if (mem->mem_type == TTM_PL_TT) {
 			ttm = container_of(bo_va->bo->tbo.ttm, struct
 					   ttm_dma_tt, ttm);
 			pages_addr = ttm->dma_address;
-			break;
-
-		case TTM_PL_VRAM:
-			addr += adev->vm_manager.vram_base_offset;
-			break;
-
-		default:
-			break;
 		}
-
 		exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
 	}
 
 	flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
 	gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
-		adev == bo_va->bo->adev) ? flags : 0;
+		adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ? flags : 0;
 
 	spin_lock(&vm->status_lock);
 	if (!list_empty(&bo_va->vm_status))
@@ -1190,7 +1210,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 	list_for_each_entry(mapping, &bo_va->invalids, list) {
 		r = amdgpu_vm_bo_split_mapping(adev, exclusive,
 					       gtt_flags, pages_addr, vm,
-					       mapping, flags, addr,
+					       mapping, flags, nodes,
 					       &bo_va->last_pt_update);
 		if (r)
 			return r;
@@ -1405,18 +1425,17 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
 	/* walk over the address space and allocate the page tables */
 	for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
 		struct reservation_object *resv = vm->page_directory->tbo.resv;
-		struct amdgpu_bo_list_entry *entry;
 		struct amdgpu_bo *pt;
 
-		entry = &vm->page_tables[pt_idx].entry;
-		if (entry->robj)
+		if (vm->page_tables[pt_idx].bo)
 			continue;
 
 		r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
 				     AMDGPU_GPU_PAGE_SIZE, true,
 				     AMDGPU_GEM_DOMAIN_VRAM,
 				     AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
-				     AMDGPU_GEM_CREATE_SHADOW,
+				     AMDGPU_GEM_CREATE_SHADOW |
+				     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 				     NULL, resv, &pt);
 		if (r)
 			goto error_free;
@@ -1442,11 +1461,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
 			}
 		}
 
-		entry->robj = pt;
-		entry->priority = 0;
-		entry->tv.bo = &entry->robj->tbo;
-		entry->tv.shared = true;
-		entry->user_pages = NULL;
+		vm->page_tables[pt_idx].bo = pt;
 		vm->page_tables[pt_idx].addr = 0;
 	}
 
@@ -1626,7 +1641,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 	r = amdgpu_bo_create(adev, pd_size, align, true,
 			     AMDGPU_GEM_DOMAIN_VRAM,
 			     AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
-			     AMDGPU_GEM_CREATE_SHADOW,
+			     AMDGPU_GEM_CREATE_SHADOW |
+			     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 			     NULL, NULL, &vm->page_directory);
 	if (r)
 		goto error_free_sched_entity;
@@ -1697,7 +1713,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 	}
 
 	for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
-		struct amdgpu_bo *pt = vm->page_tables[i].entry.robj;
+		struct amdgpu_bo *pt = vm->page_tables[i].bo;
 
 		if (!pt)
 			continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
new file mode 100644
index 0000000000000000000000000000000000000000..42a629b560955efe2692b7452403c09bdb0ab3b8
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#ifndef __AMDGPU_VM_H__
+#define __AMDGPU_VM_H__
+
+#include <linux/rbtree.h>
+
+#include "gpu_scheduler.h"
+#include "amdgpu_sync.h"
+#include "amdgpu_ring.h"
+
+struct amdgpu_bo_va;
+struct amdgpu_job;
+struct amdgpu_bo_list_entry;
+
+/*
+ * GPUVM handling
+ */
+
+/* maximum number of VMIDs */
+#define AMDGPU_NUM_VM	16
+
+/* Maximum number of PTEs the hardware can write with one command */
+#define AMDGPU_VM_MAX_UPDATE_SIZE	0x3FFFF
+
+/* number of entries in page table */
+#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
+
+/* PTBs (Page Table Blocks) need to be aligned to 32K */
+#define AMDGPU_VM_PTB_ALIGN_SIZE   32768
+
+/* LOG2 number of continuous pages for the fragment field */
+#define AMDGPU_LOG2_PAGES_PER_FRAG 4
+
+#define AMDGPU_PTE_VALID	(1 << 0)
+#define AMDGPU_PTE_SYSTEM	(1 << 1)
+#define AMDGPU_PTE_SNOOPED	(1 << 2)
+
+/* VI only */
+#define AMDGPU_PTE_EXECUTABLE	(1 << 4)
+
+#define AMDGPU_PTE_READABLE	(1 << 5)
+#define AMDGPU_PTE_WRITEABLE	(1 << 6)
+
+#define AMDGPU_PTE_FRAG(x)	((x & 0x1f) << 7)
+
+/* How to programm VM fault handling */
+#define AMDGPU_VM_FAULT_STOP_NEVER	0
+#define AMDGPU_VM_FAULT_STOP_FIRST	1
+#define AMDGPU_VM_FAULT_STOP_ALWAYS	2
+
+struct amdgpu_vm_pt {
+	struct amdgpu_bo	*bo;
+	uint64_t		addr;
+};
+
+struct amdgpu_vm {
+	/* tree of virtual addresses mapped */
+	struct rb_root		va;
+
+	/* protecting invalidated */
+	spinlock_t		status_lock;
+
+	/* BOs moved, but not yet updated in the PT */
+	struct list_head	invalidated;
+
+	/* BOs cleared in the PT because of a move */
+	struct list_head	cleared;
+
+	/* BO mappings freed, but not yet updated in the PT */
+	struct list_head	freed;
+
+	/* contains the page directory */
+	struct amdgpu_bo	*page_directory;
+	unsigned		max_pde_used;
+	struct fence		*page_directory_fence;
+	uint64_t		last_eviction_counter;
+
+	/* array of page tables, one for each page directory entry */
+	struct amdgpu_vm_pt	*page_tables;
+
+	/* for id and flush management per ring */
+	struct amdgpu_vm_id	*ids[AMDGPU_MAX_RINGS];
+
+	/* protecting freed */
+	spinlock_t		freed_lock;
+
+	/* Scheduler entity for page table updates */
+	struct amd_sched_entity	entity;
+
+	/* client id */
+	u64                     client_id;
+};
+
+struct amdgpu_vm_id {
+	struct list_head	list;
+	struct fence		*first;
+	struct amdgpu_sync	active;
+	struct fence		*last_flush;
+	atomic64_t		owner;
+
+	uint64_t		pd_gpu_addr;
+	/* last flushed PD/PT update */
+	struct fence		*flushed_updates;
+
+	uint32_t                current_gpu_reset_count;
+
+	uint32_t		gds_base;
+	uint32_t		gds_size;
+	uint32_t		gws_base;
+	uint32_t		gws_size;
+	uint32_t		oa_base;
+	uint32_t		oa_size;
+};
+
+struct amdgpu_vm_manager {
+	/* Handling of VMIDs */
+	struct mutex				lock;
+	unsigned				num_ids;
+	struct list_head			ids_lru;
+	struct amdgpu_vm_id			ids[AMDGPU_NUM_VM];
+
+	/* Handling of VM fences */
+	u64					fence_context;
+	unsigned				seqno[AMDGPU_MAX_RINGS];
+
+	uint32_t				max_pfn;
+	/* vram base address for page table entry  */
+	u64					vram_base_offset;
+	/* is vm enabled? */
+	bool					enabled;
+	/* vm pte handling */
+	const struct amdgpu_vm_pte_funcs        *vm_pte_funcs;
+	struct amdgpu_ring                      *vm_pte_rings[AMDGPU_MAX_RINGS];
+	unsigned				vm_pte_num_rings;
+	atomic_t				vm_pte_next_ring;
+	/* client id counter */
+	atomic64_t				client_counter;
+};
+
+void amdgpu_vm_manager_init(struct amdgpu_device *adev);
+void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
+int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
+			 struct list_head *validated,
+			 struct amdgpu_bo_list_entry *entry);
+int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+			      int (*callback)(void *p, struct amdgpu_bo *bo),
+			      void *param);
+void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
+				  struct amdgpu_vm *vm);
+int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+		      struct amdgpu_sync *sync, struct fence *fence,
+		      struct amdgpu_job *job);
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
+void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
+int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+				    struct amdgpu_vm *vm);
+int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
+			  struct amdgpu_vm *vm);
+int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+			     struct amdgpu_sync *sync);
+int amdgpu_vm_bo_update(struct amdgpu_device *adev,
+			struct amdgpu_bo_va *bo_va,
+			bool clear);
+void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
+			     struct amdgpu_bo *bo);
+struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
+				       struct amdgpu_bo *bo);
+struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
+				      struct amdgpu_vm *vm,
+				      struct amdgpu_bo *bo);
+int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+		     struct amdgpu_bo_va *bo_va,
+		     uint64_t addr, uint64_t offset,
+		     uint64_t size, uint32_t flags);
+int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
+		       struct amdgpu_bo_va *bo_va,
+		       uint64_t addr);
+void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
+		      struct amdgpu_bo_va *bo_va);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
new file mode 100644
index 0000000000000000000000000000000000000000..180eed7c8bcad6774f7ff3056d4193ff80b1c970
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#include <drm/drmP.h>
+#include "amdgpu.h"
+
+struct amdgpu_vram_mgr {
+	struct drm_mm mm;
+	spinlock_t lock;
+};
+
+/**
+ * amdgpu_vram_mgr_init - init VRAM manager and DRM MM
+ *
+ * @man: TTM memory type manager
+ * @p_size: maximum size of VRAM
+ *
+ * Allocate and initialize the VRAM manager.
+ */
+static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
+				unsigned long p_size)
+{
+	struct amdgpu_vram_mgr *mgr;
+
+	mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+	if (!mgr)
+		return -ENOMEM;
+
+	drm_mm_init(&mgr->mm, 0, p_size);
+	spin_lock_init(&mgr->lock);
+	man->priv = mgr;
+	return 0;
+}
+
+/**
+ * amdgpu_vram_mgr_fini - free and destroy VRAM manager
+ *
+ * @man: TTM memory type manager
+ *
+ * Destroy and free the VRAM manager, returns -EBUSY if ranges are still
+ * allocated inside it.
+ */
+static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
+{
+	struct amdgpu_vram_mgr *mgr = man->priv;
+
+	spin_lock(&mgr->lock);
+	if (!drm_mm_clean(&mgr->mm)) {
+		spin_unlock(&mgr->lock);
+		return -EBUSY;
+	}
+
+	drm_mm_takedown(&mgr->mm);
+	spin_unlock(&mgr->lock);
+	kfree(mgr);
+	man->priv = NULL;
+	return 0;
+}
+
+/**
+ * amdgpu_vram_mgr_new - allocate new ranges
+ *
+ * @man: TTM memory type manager
+ * @tbo: TTM BO we need this range for
+ * @place: placement flags and restrictions
+ * @mem: the resulting mem object
+ *
+ * Allocate VRAM for the given BO.
+ */
+static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
+			       struct ttm_buffer_object *tbo,
+			       const struct ttm_place *place,
+			       struct ttm_mem_reg *mem)
+{
+	struct amdgpu_bo *bo = container_of(tbo, struct amdgpu_bo, tbo);
+	struct amdgpu_vram_mgr *mgr = man->priv;
+	struct drm_mm *mm = &mgr->mm;
+	struct drm_mm_node *nodes;
+	enum drm_mm_search_flags sflags = DRM_MM_SEARCH_DEFAULT;
+	enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
+	unsigned long lpfn, num_nodes, pages_per_node, pages_left;
+	unsigned i;
+	int r;
+
+	lpfn = place->lpfn;
+	if (!lpfn)
+		lpfn = man->size;
+
+	if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS ||
+	    amdgpu_vram_page_split == -1) {
+		pages_per_node = ~0ul;
+		num_nodes = 1;
+	} else {
+		pages_per_node = max((uint32_t)amdgpu_vram_page_split,
+				     mem->page_alignment);
+		num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
+	}
+
+	nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL);
+	if (!nodes)
+		return -ENOMEM;
+
+	if (place->flags & TTM_PL_FLAG_TOPDOWN) {
+		sflags = DRM_MM_SEARCH_BELOW;
+		aflags = DRM_MM_CREATE_TOP;
+	}
+
+	pages_left = mem->num_pages;
+
+	spin_lock(&mgr->lock);
+	for (i = 0; i < num_nodes; ++i) {
+		unsigned long pages = min(pages_left, pages_per_node);
+		uint32_t alignment = mem->page_alignment;
+
+		if (pages == pages_per_node)
+			alignment = pages_per_node;
+		else
+			sflags |= DRM_MM_SEARCH_BEST;
+
+		r = drm_mm_insert_node_in_range_generic(mm, &nodes[i], pages,
+							alignment, 0,
+							place->fpfn, lpfn,
+							sflags, aflags);
+		if (unlikely(r))
+			goto error;
+
+		pages_left -= pages;
+	}
+	spin_unlock(&mgr->lock);
+
+	mem->start = num_nodes == 1 ? nodes[0].start : AMDGPU_BO_INVALID_OFFSET;
+	mem->mm_node = nodes;
+
+	return 0;
+
+error:
+	while (i--)
+		drm_mm_remove_node(&nodes[i]);
+	spin_unlock(&mgr->lock);
+
+	kfree(nodes);
+	return r == -ENOSPC ? 0 : r;
+}
+
+/**
+ * amdgpu_vram_mgr_del - free ranges
+ *
+ * @man: TTM memory type manager
+ * @tbo: TTM BO we need this range for
+ * @place: placement flags and restrictions
+ * @mem: TTM memory object
+ *
+ * Free the allocated VRAM again.
+ */
+static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
+				struct ttm_mem_reg *mem)
+{
+	struct amdgpu_vram_mgr *mgr = man->priv;
+	struct drm_mm_node *nodes = mem->mm_node;
+	unsigned pages = mem->num_pages;
+
+	if (!mem->mm_node)
+		return;
+
+	spin_lock(&mgr->lock);
+	while (pages) {
+		pages -= nodes->size;
+		drm_mm_remove_node(nodes);
+		++nodes;
+	}
+	spin_unlock(&mgr->lock);
+
+	kfree(mem->mm_node);
+	mem->mm_node = NULL;
+}
+
+/**
+ * amdgpu_vram_mgr_debug - dump VRAM table
+ *
+ * @man: TTM memory type manager
+ * @prefix: text prefix
+ *
+ * Dump the table content using printk.
+ */
+static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
+				  const char *prefix)
+{
+	struct amdgpu_vram_mgr *mgr = man->priv;
+
+	spin_lock(&mgr->lock);
+	drm_mm_debug_table(&mgr->mm, prefix);
+	spin_unlock(&mgr->lock);
+}
+
+const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
+	amdgpu_vram_mgr_init,
+	amdgpu_vram_mgr_fini,
+	amdgpu_vram_mgr_new,
+	amdgpu_vram_mgr_del,
+	amdgpu_vram_mgr_debug
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index f7d236f95e74262dd6970050de140c1b01f0b017..8c9bc75a9c2db63288f2c6765b02f68f63875194 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -31,6 +31,7 @@
 #include "atom.h"
 #include "atom-bits.h"
 #include "atombios_encoders.h"
+#include "atombios_crtc.h"
 #include "amdgpu_atombios.h"
 #include "amdgpu_pll.h"
 #include "amdgpu_connectors.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 1d8c375a3561c9f872a4d87c0b435048f5a4b10b..e9b1964d4e61901cf66c8bd1d23118b2d6a46fc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -887,9 +887,6 @@ static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
 {
 	struct ci_power_info *pi = ci_get_pi(adev);
 
-	if (pi->uvd_power_gated == gate)
-		return;
-
 	pi->uvd_power_gated = gate;
 
 	ci_update_uvd_dpm(adev, gate);
@@ -960,6 +957,12 @@ static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
 		sclk = ps->performance_levels[0].sclk;
 	}
 
+	if (adev->pm.pm_display_cfg.min_core_set_clock > sclk)
+		sclk = adev->pm.pm_display_cfg.min_core_set_clock;
+
+	if (adev->pm.pm_display_cfg.min_mem_set_clock > mclk)
+		mclk = adev->pm.pm_display_cfg.min_mem_set_clock;
+
 	if (rps->vce_active) {
 		if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
 			sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
@@ -2201,6 +2204,11 @@ static int ci_upload_firmware(struct amdgpu_device *adev)
 	struct ci_power_info *pi = ci_get_pi(adev);
 	int i, ret;
 
+	if (amdgpu_ci_is_smc_running(adev)) {
+		DRM_INFO("smc is running, no need to load smc firmware\n");
+		return 0;
+	}
+
 	for (i = 0; i < adev->usec_timeout; i++) {
 		if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
 			break;
@@ -4190,8 +4198,15 @@ static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
 {
 	struct ci_power_info *pi = ci_get_pi(adev);
 	u32 tmp;
+	int ret = 0;
 
 	if (!gate) {
+		/* turn the clocks on when decoding */
+		ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+						    AMD_CG_STATE_UNGATE);
+		if (ret)
+			return ret;
+
 		if (pi->caps_uvd_dpm ||
 		    (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
 			pi->smc_state_table.UvdBootLevel = 0;
@@ -4203,9 +4218,17 @@ static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
 		tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK;
 		tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT);
 		WREG32_SMC(ixDPM_TABLE_475, tmp);
+		ret = ci_enable_uvd_dpm(adev, true);
+	} else {
+		ret = ci_enable_uvd_dpm(adev, false);
+		if (ret)
+			return ret;
+
+		ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+						    AMD_CG_STATE_GATE);
 	}
 
-	return ci_enable_uvd_dpm(adev, !gate);
+	return ret;
 }
 
 static u8 ci_get_vce_boot_level(struct amdgpu_device *adev)
@@ -4247,13 +4270,12 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev,
 
 			ret = ci_enable_vce_dpm(adev, true);
 		} else {
+			ret = ci_enable_vce_dpm(adev, false);
+			if (ret)
+				return ret;
 			/* turn the clocks off when not encoding */
 			ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
 							    AMD_CG_STATE_GATE);
-			if (ret)
-				return ret;
-
-			ret = ci_enable_vce_dpm(adev, false);
 		}
 	}
 	return ret;
@@ -5219,6 +5241,7 @@ static void ci_update_current_ps(struct amdgpu_device *adev,
 	pi->current_rps = *rps;
 	pi->current_ps = *new_ps;
 	pi->current_rps.ps_priv = &pi->current_ps;
+	adev->pm.dpm.current_ps = &pi->current_rps;
 }
 
 static void ci_update_requested_ps(struct amdgpu_device *adev,
@@ -5230,6 +5253,7 @@ static void ci_update_requested_ps(struct amdgpu_device *adev,
 	pi->requested_rps = *rps;
 	pi->requested_ps = *new_ps;
 	pi->requested_rps.ps_priv = &pi->requested_ps;
+	adev->pm.dpm.requested_ps = &pi->requested_rps;
 }
 
 static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
@@ -5267,8 +5291,6 @@ static int ci_dpm_enable(struct amdgpu_device *adev)
 	struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
 	int ret;
 
-	if (amdgpu_ci_is_smc_running(adev))
-		return -EINVAL;
 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
 		ci_enable_voltage_control(adev);
 		ret = ci_construct_voltage_tables(adev);
@@ -5689,7 +5711,7 @@ static int ci_parse_power_table(struct amdgpu_device *adev)
 	adev->pm.dpm.num_ps = state_array->ucNumEntries;
 
 	/* fill in the vce power states */
-	for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+	for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
 		u32 sclk, mclk;
 		clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
 		clock_info = (union pplib_clock_info *)
@@ -6094,6 +6116,56 @@ static void ci_dpm_print_power_state(struct amdgpu_device *adev,
 	amdgpu_dpm_print_ps_status(adev, rps);
 }
 
+static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1,
+						const struct ci_pl *ci_cpl2)
+{
+	return ((ci_cpl1->mclk == ci_cpl2->mclk) &&
+		  (ci_cpl1->sclk == ci_cpl2->sclk) &&
+		  (ci_cpl1->pcie_gen == ci_cpl2->pcie_gen) &&
+		  (ci_cpl1->pcie_lane == ci_cpl2->pcie_lane));
+}
+
+static int ci_check_state_equal(struct amdgpu_device *adev,
+				struct amdgpu_ps *cps,
+				struct amdgpu_ps *rps,
+				bool *equal)
+{
+	struct ci_ps *ci_cps;
+	struct ci_ps *ci_rps;
+	int i;
+
+	if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
+		return -EINVAL;
+
+	ci_cps = ci_get_ps(cps);
+	ci_rps = ci_get_ps(rps);
+
+	if (ci_cps == NULL) {
+		*equal = false;
+		return 0;
+	}
+
+	if (ci_cps->performance_level_count != ci_rps->performance_level_count) {
+
+		*equal = false;
+		return 0;
+	}
+
+	for (i = 0; i < ci_cps->performance_level_count; i++) {
+		if (!ci_are_power_levels_equal(&(ci_cps->performance_levels[i]),
+					&(ci_rps->performance_levels[i]))) {
+			*equal = false;
+			return 0;
+		}
+	}
+
+	/* If all performance levels are the same try to use the UVD clocks to break the tie.*/
+	*equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
+	*equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
+
+	return 0;
+}
+
 static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
 {
 	struct ci_power_info *pi = ci_get_pi(adev);
@@ -6287,12 +6359,19 @@ static int ci_dpm_suspend(void *handle)
 
 	if (adev->pm.dpm_enabled) {
 		mutex_lock(&adev->pm.mutex);
-		/* disable dpm */
-		ci_dpm_disable(adev);
-		/* reset the power state */
-		adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+		amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
+			       AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
+		amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
+			       AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
+		adev->pm.dpm.last_user_state = adev->pm.dpm.user_state;
+		adev->pm.dpm.last_state = adev->pm.dpm.state;
+		adev->pm.dpm.user_state = POWER_STATE_TYPE_INTERNAL_BOOT;
+		adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_BOOT;
 		mutex_unlock(&adev->pm.mutex);
+		amdgpu_pm_compute_clocks(adev);
+
 	}
+
 	return 0;
 }
 
@@ -6310,6 +6389,8 @@ static int ci_dpm_resume(void *handle)
 			adev->pm.dpm_enabled = false;
 		else
 			adev->pm.dpm_enabled = true;
+		adev->pm.dpm.user_state = adev->pm.dpm.last_user_state;
+		adev->pm.dpm.state = adev->pm.dpm.last_state;
 		mutex_unlock(&adev->pm.mutex);
 		if (adev->pm.dpm_enabled)
 			amdgpu_pm_compute_clocks(adev);
@@ -6644,6 +6725,8 @@ static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
 	.set_sclk_od = ci_dpm_set_sclk_od,
 	.get_mclk_od = ci_dpm_get_mclk_od,
 	.set_mclk_od = ci_dpm_set_mclk_od,
+	.check_state_equal = ci_check_state_equal,
+	.get_vce_clock_state = amdgpu_get_vce_clock_state,
 };
 
 static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
@@ -6662,3 +6745,12 @@ static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
 	adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
 	adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
 }
+
+const struct amdgpu_ip_block_version ci_dpm_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_SMC,
+	.major = 7,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &ci_dpm_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index a845b6a93b79bed19cd8027f867ca65460cd27bf..302df85893ab599bce0d539c398b6b99a0b75695 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1189,18 +1189,6 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
 	return r;
 }
 
-static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
-{
-	u32 tmp = RREG32(mmBIOS_SCRATCH_3);
-
-	if (hung)
-		tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
-	else
-		tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
-
-	WREG32(mmBIOS_SCRATCH_3, tmp);
-}
-
 /**
  * cik_asic_reset - soft reset GPU
  *
@@ -1213,11 +1201,12 @@ static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hu
 static int cik_asic_reset(struct amdgpu_device *adev)
 {
 	int r;
-	cik_set_bios_scratch_engine_hung(adev, true);
+
+	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
 
 	r = cik_gpu_pci_config_reset(adev);
 
-	cik_set_bios_scratch_engine_hung(adev, false);
+	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
 
 	return r;
 }
@@ -1641,745 +1630,6 @@ static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
 		adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
 }
 
-static const struct amdgpu_ip_block_version bonaire_ip_blocks[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 8,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &dce_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 7,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &gfx_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_sdma_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 4,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &uvd_v4_2_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v2_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version bonaire_ip_blocks_vd[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 8,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &dce_virtual_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 7,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &gfx_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_sdma_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 4,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &uvd_v4_2_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v2_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version hawaii_ip_blocks[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 8,
-		.minor = 5,
-		.rev = 0,
-		.funcs = &dce_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 7,
-		.minor = 3,
-		.rev = 0,
-		.funcs = &gfx_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_sdma_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 4,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &uvd_v4_2_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v2_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version hawaii_ip_blocks_vd[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 8,
-		.minor = 5,
-		.rev = 0,
-		.funcs = &dce_virtual_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 7,
-		.minor = 3,
-		.rev = 0,
-		.funcs = &gfx_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_sdma_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 4,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &uvd_v4_2_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v2_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version kabini_ip_blocks[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 8,
-		.minor = 3,
-		.rev = 0,
-		.funcs = &dce_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 7,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &gfx_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_sdma_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 4,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &uvd_v4_2_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v2_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version kabini_ip_blocks_vd[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 8,
-		.minor = 3,
-		.rev = 0,
-		.funcs = &dce_virtual_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 7,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &gfx_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_sdma_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 4,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &uvd_v4_2_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v2_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version mullins_ip_blocks[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 8,
-		.minor = 3,
-		.rev = 0,
-		.funcs = &dce_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 7,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &gfx_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_sdma_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 4,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &uvd_v4_2_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v2_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version mullins_ip_blocks_vd[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 8,
-		.minor = 3,
-		.rev = 0,
-		.funcs = &dce_virtual_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 7,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &gfx_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_sdma_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 4,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &uvd_v4_2_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v2_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version kaveri_ip_blocks[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 8,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &dce_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 7,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &gfx_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_sdma_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 4,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &uvd_v4_2_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v2_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version kaveri_ip_blocks_vd[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 8,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &dce_virtual_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 7,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &gfx_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cik_sdma_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 4,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &uvd_v4_2_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v2_0_ip_funcs,
-	},
-};
-
-int cik_set_ip_blocks(struct amdgpu_device *adev)
-{
-	if (adev->enable_virtual_display) {
-		switch (adev->asic_type) {
-		case CHIP_BONAIRE:
-			adev->ip_blocks = bonaire_ip_blocks_vd;
-			adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks_vd);
-			break;
-		case CHIP_HAWAII:
-			adev->ip_blocks = hawaii_ip_blocks_vd;
-			adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks_vd);
-			break;
-		case CHIP_KAVERI:
-			adev->ip_blocks = kaveri_ip_blocks_vd;
-			adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks_vd);
-			break;
-		case CHIP_KABINI:
-			adev->ip_blocks = kabini_ip_blocks_vd;
-			adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks_vd);
-			break;
-		case CHIP_MULLINS:
-			adev->ip_blocks = mullins_ip_blocks_vd;
-			adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks_vd);
-			break;
-		default:
-			/* FIXME: not supported yet */
-			return -EINVAL;
-		}
-	} else {
-		switch (adev->asic_type) {
-		case CHIP_BONAIRE:
-			adev->ip_blocks = bonaire_ip_blocks;
-			adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks);
-			break;
-		case CHIP_HAWAII:
-			adev->ip_blocks = hawaii_ip_blocks;
-			adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks);
-			break;
-		case CHIP_KAVERI:
-			adev->ip_blocks = kaveri_ip_blocks;
-			adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks);
-			break;
-		case CHIP_KABINI:
-			adev->ip_blocks = kabini_ip_blocks;
-			adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks);
-			break;
-		case CHIP_MULLINS:
-			adev->ip_blocks = mullins_ip_blocks;
-			adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks);
-			break;
-		default:
-			/* FIXME: not supported yet */
-			return -EINVAL;
-		}
-	}
-
-	return 0;
-}
-
 static const struct amdgpu_asic_funcs cik_asic_funcs =
 {
 	.read_disabled_bios = &cik_read_disabled_bios,
@@ -2612,7 +1862,7 @@ static int cik_common_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs cik_common_ip_funcs = {
+static const struct amd_ip_funcs cik_common_ip_funcs = {
 	.name = "cik_common",
 	.early_init = cik_common_early_init,
 	.late_init = NULL,
@@ -2628,3 +1878,79 @@ const struct amd_ip_funcs cik_common_ip_funcs = {
 	.set_clockgating_state = cik_common_set_clockgating_state,
 	.set_powergating_state = cik_common_set_powergating_state,
 };
+
+static const struct amdgpu_ip_block_version cik_common_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_COMMON,
+	.major = 1,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &cik_common_ip_funcs,
+};
+
+int cik_set_ip_blocks(struct amdgpu_device *adev)
+{
+	switch (adev->asic_type) {
+	case CHIP_BONAIRE:
+		amdgpu_ip_block_add(adev, &cik_common_ip_block);
+		amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
+		amdgpu_ip_block_add(adev, &cik_ih_ip_block);
+		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+		if (adev->enable_virtual_display)
+			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+		else
+			amdgpu_ip_block_add(adev, &dce_v8_2_ip_block);
+		amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
+		amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
+		amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
+		amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+		break;
+	case CHIP_HAWAII:
+		amdgpu_ip_block_add(adev, &cik_common_ip_block);
+		amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
+		amdgpu_ip_block_add(adev, &cik_ih_ip_block);
+		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+		if (adev->enable_virtual_display)
+			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+		else
+			amdgpu_ip_block_add(adev, &dce_v8_5_ip_block);
+		amdgpu_ip_block_add(adev, &gfx_v7_3_ip_block);
+		amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
+		amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
+		amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+		break;
+	case CHIP_KAVERI:
+		amdgpu_ip_block_add(adev, &cik_common_ip_block);
+		amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
+		amdgpu_ip_block_add(adev, &cik_ih_ip_block);
+		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+		if (adev->enable_virtual_display)
+			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+		else
+			amdgpu_ip_block_add(adev, &dce_v8_1_ip_block);
+		amdgpu_ip_block_add(adev, &gfx_v7_1_ip_block);
+		amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
+		amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
+		amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+		break;
+	case CHIP_KABINI:
+	case CHIP_MULLINS:
+		amdgpu_ip_block_add(adev, &cik_common_ip_block);
+		amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
+		amdgpu_ip_block_add(adev, &cik_ih_ip_block);
+		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+		if (adev->enable_virtual_display)
+			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+		else
+			amdgpu_ip_block_add(adev, &dce_v8_3_ip_block);
+		amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
+		amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
+		amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
+		amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+		break;
+	default:
+		/* FIXME: not supported yet */
+		return -EINVAL;
+	}
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h
index 5ebd2d7a032772e960399a6fd9cf943a1cc5e7a3..c4989f51ecefab5520137e484f412aa1f3bb82ae 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik.h
@@ -24,8 +24,6 @@
 #ifndef __CIK_H__
 #define __CIK_H__
 
-extern const struct amd_ip_funcs cik_common_ip_funcs;
-
 void cik_srbm_select(struct amdgpu_device *adev,
 		     u32 me, u32 pipe, u32 queue, u32 vmid);
 int cik_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index be3d6f79a864170718f1f995cc3111109623a0d7..319b32cdea84cf9e92f59691df598436b306367e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -413,7 +413,7 @@ static int cik_ih_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs cik_ih_ip_funcs = {
+static const struct amd_ip_funcs cik_ih_ip_funcs = {
 	.name = "cik_ih",
 	.early_init = cik_ih_early_init,
 	.late_init = NULL,
@@ -441,3 +441,12 @@ static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev)
 	if (adev->irq.ih_funcs == NULL)
 		adev->irq.ih_funcs = &cik_ih_funcs;
 }
+
+const struct amdgpu_ip_block_version cik_ih_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_IH,
+	.major = 2,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &cik_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.h b/drivers/gpu/drm/amd/amdgpu/cik_ih.h
index 6b0f375ec24483a41939f116edadd58555146862..1d9ddee2868e9b464a0c4982612232f1ba7d1895 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.h
@@ -24,6 +24,6 @@
 #ifndef __CIK_IH_H__
 #define __CIK_IH_H__
 
-extern const struct amd_ip_funcs cik_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version cik_ih_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index cb952acc71339e31ac613a896fd1c268b0f327dc..c7340b6e17c9d0d8d09c4ea1ad28b4cad089d98c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -206,10 +206,10 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 
 	for (i = 0; i < count; i++)
 		if (sdma && sdma->burst_nop && (i == 0))
-			amdgpu_ring_write(ring, ring->nop |
+			amdgpu_ring_write(ring, ring->funcs->nop |
 					  SDMA_NOP_COUNT(count - 1));
 		else
-			amdgpu_ring_write(ring, ring->nop);
+			amdgpu_ring_write(ring, ring->funcs->nop);
 }
 
 /**
@@ -848,22 +848,6 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
 	amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
 }
 
-static unsigned cik_sdma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
-	return
-		7 + 4; /* cik_sdma_ring_emit_ib */
-}
-
-static unsigned cik_sdma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
-	return
-		6 + /* cik_sdma_ring_emit_hdp_flush */
-		3 + /* cik_sdma_ring_emit_hdp_invalidate */
-		6 + /* cik_sdma_ring_emit_pipeline_sync */
-		12 + /* cik_sdma_ring_emit_vm_flush */
-		9 + 9 + 9; /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
-}
-
 static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
 				 bool enable)
 {
@@ -959,11 +943,10 @@ static int cik_sdma_sw_init(void *handle)
 		ring->ring_obj = NULL;
 		sprintf(ring->name, "sdma%d", i);
 		r = amdgpu_ring_init(adev, ring, 1024,
-				     SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
 				     &adev->sdma.trap_irq,
 				     (i == 0) ?
-				     AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
-				     AMDGPU_RING_TYPE_SDMA);
+				     AMDGPU_SDMA_IRQ_TRAP0 :
+				     AMDGPU_SDMA_IRQ_TRAP1);
 		if (r)
 			return r;
 	}
@@ -1207,7 +1190,7 @@ static int cik_sdma_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs cik_sdma_ip_funcs = {
+static const struct amd_ip_funcs cik_sdma_ip_funcs = {
 	.name = "cik_sdma",
 	.early_init = cik_sdma_early_init,
 	.late_init = NULL,
@@ -1225,10 +1208,19 @@ const struct amd_ip_funcs cik_sdma_ip_funcs = {
 };
 
 static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
+	.type = AMDGPU_RING_TYPE_SDMA,
+	.align_mask = 0xf,
+	.nop = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0),
 	.get_rptr = cik_sdma_ring_get_rptr,
 	.get_wptr = cik_sdma_ring_get_wptr,
 	.set_wptr = cik_sdma_ring_set_wptr,
-	.parse_cs = NULL,
+	.emit_frame_size =
+		6 + /* cik_sdma_ring_emit_hdp_flush */
+		3 + /* cik_sdma_ring_emit_hdp_invalidate */
+		6 + /* cik_sdma_ring_emit_pipeline_sync */
+		12 + /* cik_sdma_ring_emit_vm_flush */
+		9 + 9 + 9, /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
+	.emit_ib_size = 7 + 4, /* cik_sdma_ring_emit_ib */
 	.emit_ib = cik_sdma_ring_emit_ib,
 	.emit_fence = cik_sdma_ring_emit_fence,
 	.emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync,
@@ -1239,8 +1231,6 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
 	.test_ib = cik_sdma_ring_test_ib,
 	.insert_nop = cik_sdma_ring_insert_nop,
 	.pad_ib = cik_sdma_ring_pad_ib,
-	.get_emit_ib_size = cik_sdma_ring_get_emit_ib_size,
-	.get_dma_frame_size = cik_sdma_ring_get_dma_frame_size,
 };
 
 static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
@@ -1352,3 +1342,12 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
 		adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
 	}
 }
+
+const struct amdgpu_ip_block_version cik_sdma_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_SDMA,
+	.major = 2,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &cik_sdma_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.h b/drivers/gpu/drm/amd/amdgpu/cik_sdma.h
index 027727c677b8a6702e0c355722d34aa7e4dcd3b3..a4a8fe01410bf5ed6dc7f5cd5528e036aec955c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.h
@@ -24,6 +24,6 @@
 #ifndef __CIK_SDMA_H__
 #define __CIK_SDMA_H__
 
-extern const struct amd_ip_funcs cik_sdma_ip_funcs;
+extern const struct amdgpu_ip_block_version cik_sdma_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index 8659852aea9ef05db22c5b5da03125dbc4903195..6cbd913fd12ed6f971bc50440e19132f77b6d190 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -43,6 +43,14 @@
 #define CRTC4_REGISTER_OFFSET                 (0x477c - 0x1b7c)
 #define CRTC5_REGISTER_OFFSET                 (0x4a7c - 0x1b7c)
 
+/* hpd instance offsets */
+#define HPD0_REGISTER_OFFSET                 (0x1807 - 0x1807)
+#define HPD1_REGISTER_OFFSET                 (0x180a - 0x1807)
+#define HPD2_REGISTER_OFFSET                 (0x180d - 0x1807)
+#define HPD3_REGISTER_OFFSET                 (0x1810 - 0x1807)
+#define HPD4_REGISTER_OFFSET                 (0x1813 - 0x1807)
+#define HPD5_REGISTER_OFFSET                 (0x1816 - 0x1807)
+
 #define BONAIRE_GB_ADDR_CONFIG_GOLDEN        0x12010001
 #define HAWAII_GB_ADDR_CONFIG_GOLDEN         0x12011003
 
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 3c082e1437303d3247da620fcd08ba8a031e62e0..352b5fad5a06394ab08db80defaa5d39a2492137 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -1250,7 +1250,8 @@ static void cz_update_current_ps(struct amdgpu_device *adev,
 
 	pi->current_ps = *ps;
 	pi->current_rps = *rps;
-	pi->current_rps.ps_priv = ps;
+	pi->current_rps.ps_priv = &pi->current_ps;
+	adev->pm.dpm.current_ps = &pi->current_rps;
 
 }
 
@@ -1262,7 +1263,8 @@ static void cz_update_requested_ps(struct amdgpu_device *adev,
 
 	pi->requested_ps = *ps;
 	pi->requested_rps = *rps;
-	pi->requested_rps.ps_priv = ps;
+	pi->requested_rps.ps_priv = &pi->requested_ps;
+	adev->pm.dpm.requested_ps = &pi->requested_rps;
 
 }
 
@@ -2257,6 +2259,18 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
 	}
 }
 
+static int cz_check_state_equal(struct amdgpu_device *adev,
+				struct amdgpu_ps *cps,
+				struct amdgpu_ps *rps,
+				bool *equal)
+{
+	if (equal == NULL)
+		return -EINVAL;
+
+	*equal = false;
+	return 0;
+}
+
 const struct amd_ip_funcs cz_dpm_ip_funcs = {
 	.name = "cz_dpm",
 	.early_init = cz_dpm_early_init,
@@ -2289,6 +2303,7 @@ static const struct amdgpu_dpm_funcs cz_dpm_funcs = {
 	.vblank_too_short = NULL,
 	.powergate_uvd = cz_dpm_powergate_uvd,
 	.powergate_vce = cz_dpm_powergate_vce,
+	.check_state_equal = cz_check_state_equal,
 };
 
 static void cz_dpm_set_funcs(struct amdgpu_device *adev)
@@ -2296,3 +2311,12 @@ static void cz_dpm_set_funcs(struct amdgpu_device *adev)
 	if (NULL == adev->pm.funcs)
 		adev->pm.funcs = &cz_dpm_funcs;
 }
+
+const struct amdgpu_ip_block_version cz_dpm_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_SMC,
+	.major = 8,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &cz_dpm_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 3d23a70b6432fc26a300cf5b61c368ab7cb11733..fe7cbb24da7b835298cdda54f5b187b4dbf306a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -394,7 +394,7 @@ static int cz_ih_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs cz_ih_ip_funcs = {
+static const struct amd_ip_funcs cz_ih_ip_funcs = {
 	.name = "cz_ih",
 	.early_init = cz_ih_early_init,
 	.late_init = NULL,
@@ -423,3 +423,11 @@ static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev)
 		adev->irq.ih_funcs = &cz_ih_funcs;
 }
 
+const struct amdgpu_ip_block_version cz_ih_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_IH,
+	.major = 3,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &cz_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.h b/drivers/gpu/drm/amd/amdgpu/cz_ih.h
index fc4057a2ecb976585fa4a0ac0ced35efd2db5d9d..14be7753221b1fe2e03708c6a036c2b13918c535 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.h
@@ -24,6 +24,6 @@
 #ifndef __CZ_IH_H__
 #define __CZ_IH_H__
 
-extern const struct amd_ip_funcs cz_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version cz_ih_ip_block;
 
 #endif /* __CZ_IH_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 4108c686aa7c20619bcbe33d430dc8cbbc6fcbe7..199d3f7235d6b2699ebebd02733bf02b3bab7fc0 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -31,6 +31,7 @@
 #include "atombios_encoders.h"
 #include "amdgpu_pll.h"
 #include "amdgpu_connectors.h"
+#include "dce_v10_0.h"
 
 #include "dce/dce_10_0_d.h"
 #include "dce/dce_10_0_sh_mask.h"
@@ -330,33 +331,12 @@ static int dce_v10_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
 static bool dce_v10_0_hpd_sense(struct amdgpu_device *adev,
 			       enum amdgpu_hpd_id hpd)
 {
-	int idx;
 	bool connected = false;
 
-	switch (hpd) {
-	case AMDGPU_HPD_1:
-		idx = 0;
-		break;
-	case AMDGPU_HPD_2:
-		idx = 1;
-		break;
-	case AMDGPU_HPD_3:
-		idx = 2;
-		break;
-	case AMDGPU_HPD_4:
-		idx = 3;
-		break;
-	case AMDGPU_HPD_5:
-		idx = 4;
-		break;
-	case AMDGPU_HPD_6:
-		idx = 5;
-		break;
-	default:
+	if (hpd >= adev->mode_info.num_hpd)
 		return connected;
-	}
 
-	if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) &
+	if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
 	    DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
 		connected = true;
 
@@ -376,37 +356,16 @@ static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev,
 {
 	u32 tmp;
 	bool connected = dce_v10_0_hpd_sense(adev, hpd);
-	int idx;
 
-	switch (hpd) {
-	case AMDGPU_HPD_1:
-		idx = 0;
-		break;
-	case AMDGPU_HPD_2:
-		idx = 1;
-		break;
-	case AMDGPU_HPD_3:
-		idx = 2;
-		break;
-	case AMDGPU_HPD_4:
-		idx = 3;
-		break;
-	case AMDGPU_HPD_5:
-		idx = 4;
-		break;
-	case AMDGPU_HPD_6:
-		idx = 5;
-		break;
-	default:
+	if (hpd >= adev->mode_info.num_hpd)
 		return;
-	}
 
-	tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+	tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
 	if (connected)
 		tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
 	else
 		tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
-	WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+	WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
 }
 
 /**
@@ -422,33 +381,12 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
 	struct drm_device *dev = adev->ddev;
 	struct drm_connector *connector;
 	u32 tmp;
-	int idx;
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 
-		switch (amdgpu_connector->hpd.hpd) {
-		case AMDGPU_HPD_1:
-			idx = 0;
-			break;
-		case AMDGPU_HPD_2:
-			idx = 1;
-			break;
-		case AMDGPU_HPD_3:
-			idx = 2;
-			break;
-		case AMDGPU_HPD_4:
-			idx = 3;
-			break;
-		case AMDGPU_HPD_5:
-			idx = 4;
-			break;
-		case AMDGPU_HPD_6:
-			idx = 5;
-			break;
-		default:
+		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 			continue;
-		}
 
 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
 		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -457,24 +395,24 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
 			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
 			 * also avoid interrupt storms during dpms.
 			 */
-			tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+			tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 			tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
-			WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+			WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 			continue;
 		}
 
-		tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+		tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 		tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
-		WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+		WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 
-		tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]);
+		tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 		tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
 				    DC_HPD_CONNECT_INT_DELAY,
 				    AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
 		tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
 				    DC_HPD_DISCONNECT_INT_DELAY,
 				    AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
-		WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp);
+		WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 
 		dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
 		amdgpu_irq_get(adev, &adev->hpd_irq,
@@ -495,37 +433,16 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
 	struct drm_device *dev = adev->ddev;
 	struct drm_connector *connector;
 	u32 tmp;
-	int idx;
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 
-		switch (amdgpu_connector->hpd.hpd) {
-		case AMDGPU_HPD_1:
-			idx = 0;
-			break;
-		case AMDGPU_HPD_2:
-			idx = 1;
-			break;
-		case AMDGPU_HPD_3:
-			idx = 2;
-			break;
-		case AMDGPU_HPD_4:
-			idx = 3;
-			break;
-		case AMDGPU_HPD_5:
-			idx = 4;
-			break;
-		case AMDGPU_HPD_6:
-			idx = 5;
-			break;
-		default:
+		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 			continue;
-		}
 
-		tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+		tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 		tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
-		WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+		WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 
 		amdgpu_irq_put(adev, &adev->hpd_irq,
 			       amdgpu_connector->hpd.hpd);
@@ -3554,7 +3471,7 @@ static int dce_v10_0_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs dce_v10_0_ip_funcs = {
+static const struct amd_ip_funcs dce_v10_0_ip_funcs = {
 	.name = "dce_v10_0",
 	.early_init = dce_v10_0_early_init,
 	.late_init = NULL,
@@ -3885,3 +3802,21 @@ static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev)
 	adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
 	adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs;
 }
+
+const struct amdgpu_ip_block_version dce_v10_0_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_DCE,
+	.major = 10,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &dce_v10_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v10_1_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_DCE,
+	.major = 10,
+	.minor = 1,
+	.rev = 0,
+	.funcs = &dce_v10_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
index e3dc04d293e48759e63d4ae65d27d3028fbb0ad3..7a0747789f1d5a91a5458b1f530355f99e16da7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
@@ -24,7 +24,9 @@
 #ifndef __DCE_V10_0_H__
 #define __DCE_V10_0_H__
 
-extern const struct amd_ip_funcs dce_v10_0_ip_funcs;
+
+extern const struct amdgpu_ip_block_version dce_v10_0_ip_block;
+extern const struct amdgpu_ip_block_version dce_v10_1_ip_block;
 
 void dce_v10_0_disable_dce(struct amdgpu_device *adev);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index f264b8f17ad1b302ebb51f0f37a1805655529470..ecd000e359819f33e599483636dbb34c134abf08 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -31,6 +31,7 @@
 #include "atombios_encoders.h"
 #include "amdgpu_pll.h"
 #include "amdgpu_connectors.h"
+#include "dce_v11_0.h"
 
 #include "dce/dce_11_0_d.h"
 #include "dce/dce_11_0_sh_mask.h"
@@ -346,33 +347,12 @@ static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
 static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev,
 			       enum amdgpu_hpd_id hpd)
 {
-	int idx;
 	bool connected = false;
 
-	switch (hpd) {
-	case AMDGPU_HPD_1:
-		idx = 0;
-		break;
-	case AMDGPU_HPD_2:
-		idx = 1;
-		break;
-	case AMDGPU_HPD_3:
-		idx = 2;
-		break;
-	case AMDGPU_HPD_4:
-		idx = 3;
-		break;
-	case AMDGPU_HPD_5:
-		idx = 4;
-		break;
-	case AMDGPU_HPD_6:
-		idx = 5;
-		break;
-	default:
+	if (hpd >= adev->mode_info.num_hpd)
 		return connected;
-	}
 
-	if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) &
+	if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
 	    DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
 		connected = true;
 
@@ -392,37 +372,16 @@ static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev,
 {
 	u32 tmp;
 	bool connected = dce_v11_0_hpd_sense(adev, hpd);
-	int idx;
 
-	switch (hpd) {
-	case AMDGPU_HPD_1:
-		idx = 0;
-		break;
-	case AMDGPU_HPD_2:
-		idx = 1;
-		break;
-	case AMDGPU_HPD_3:
-		idx = 2;
-		break;
-	case AMDGPU_HPD_4:
-		idx = 3;
-		break;
-	case AMDGPU_HPD_5:
-		idx = 4;
-		break;
-	case AMDGPU_HPD_6:
-		idx = 5;
-		break;
-	default:
+	if (hpd >= adev->mode_info.num_hpd)
 		return;
-	}
 
-	tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+	tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
 	if (connected)
 		tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
 	else
 		tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
-	WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+	WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
 }
 
 /**
@@ -438,33 +397,12 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
 	struct drm_device *dev = adev->ddev;
 	struct drm_connector *connector;
 	u32 tmp;
-	int idx;
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 
-		switch (amdgpu_connector->hpd.hpd) {
-		case AMDGPU_HPD_1:
-			idx = 0;
-			break;
-		case AMDGPU_HPD_2:
-			idx = 1;
-			break;
-		case AMDGPU_HPD_3:
-			idx = 2;
-			break;
-		case AMDGPU_HPD_4:
-			idx = 3;
-			break;
-		case AMDGPU_HPD_5:
-			idx = 4;
-			break;
-		case AMDGPU_HPD_6:
-			idx = 5;
-			break;
-		default:
+		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 			continue;
-		}
 
 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
 		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -473,24 +411,24 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
 			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
 			 * also avoid interrupt storms during dpms.
 			 */
-			tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+			tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 			tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
-			WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+			WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 			continue;
 		}
 
-		tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+		tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 		tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
-		WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+		WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 
-		tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]);
+		tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 		tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
 				    DC_HPD_CONNECT_INT_DELAY,
 				    AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
 		tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
 				    DC_HPD_DISCONNECT_INT_DELAY,
 				    AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
-		WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp);
+		WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 
 		dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
 		amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
@@ -510,37 +448,16 @@ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
 	struct drm_device *dev = adev->ddev;
 	struct drm_connector *connector;
 	u32 tmp;
-	int idx;
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 
-		switch (amdgpu_connector->hpd.hpd) {
-		case AMDGPU_HPD_1:
-			idx = 0;
-			break;
-		case AMDGPU_HPD_2:
-			idx = 1;
-			break;
-		case AMDGPU_HPD_3:
-			idx = 2;
-			break;
-		case AMDGPU_HPD_4:
-			idx = 3;
-			break;
-		case AMDGPU_HPD_5:
-			idx = 4;
-			break;
-		case AMDGPU_HPD_6:
-			idx = 5;
-			break;
-		default:
+		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 			continue;
-		}
 
-		tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
+		tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
 		tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
-		WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
+		WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 
 		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 	}
@@ -3611,7 +3528,7 @@ static int dce_v11_0_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs dce_v11_0_ip_funcs = {
+static const struct amd_ip_funcs dce_v11_0_ip_funcs = {
 	.name = "dce_v11_0",
 	.early_init = dce_v11_0_early_init,
 	.late_init = NULL,
@@ -3941,3 +3858,21 @@ static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev)
 	adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
 	adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs;
 }
+
+const struct amdgpu_ip_block_version dce_v11_0_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_DCE,
+	.major = 11,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &dce_v11_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v11_2_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_DCE,
+	.major = 11,
+	.minor = 2,
+	.rev = 0,
+	.funcs = &dce_v11_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
index 1f58a65ba2ef5339368400bd3c399db0d2505282..0d878ca3acbab9e34690e9dc611621c133cb0777 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
@@ -24,7 +24,8 @@
 #ifndef __DCE_V11_0_H__
 #define __DCE_V11_0_H__
 
-extern const struct amd_ip_funcs dce_v11_0_ip_funcs;
+extern const struct amdgpu_ip_block_version dce_v11_0_ip_block;
+extern const struct amdgpu_ip_block_version dce_v11_2_ip_block;
 
 void dce_v11_0_disable_dce(struct amdgpu_device *adev);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index b948d6cb139936670228d6a9e805ecd7fbe9b225..44547f951d9251ef6f4a0417301b348bb4ed0dfd 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -46,6 +46,16 @@ static const u32 crtc_offsets[6] =
 	SI_CRTC5_REGISTER_OFFSET
 };
 
+static const u32 hpd_offsets[] =
+{
+	DC_HPD1_INT_STATUS - DC_HPD1_INT_STATUS,
+	DC_HPD2_INT_STATUS - DC_HPD1_INT_STATUS,
+	DC_HPD3_INT_STATUS - DC_HPD1_INT_STATUS,
+	DC_HPD4_INT_STATUS - DC_HPD1_INT_STATUS,
+	DC_HPD5_INT_STATUS - DC_HPD1_INT_STATUS,
+	DC_HPD6_INT_STATUS - DC_HPD1_INT_STATUS,
+};
+
 static const uint32_t dig_offsets[] = {
 	SI_CRTC0_REGISTER_OFFSET,
 	SI_CRTC1_REGISTER_OFFSET,
@@ -94,15 +104,6 @@ static const struct {
 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
 } };
 
-static const uint32_t hpd_int_control_offsets[6] = {
-	DC_HPD1_INT_CONTROL,
-	DC_HPD2_INT_CONTROL,
-	DC_HPD3_INT_CONTROL,
-	DC_HPD4_INT_CONTROL,
-	DC_HPD5_INT_CONTROL,
-	DC_HPD6_INT_CONTROL,
-};
-
 static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
 				     u32 block_offset, u32 reg)
 {
@@ -257,34 +258,11 @@ static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
 {
 	bool connected = false;
 
-	switch (hpd) {
-	case AMDGPU_HPD_1:
-		if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
-			connected = true;
-		break;
-	case AMDGPU_HPD_2:
-		if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
-			connected = true;
-		break;
-	case AMDGPU_HPD_3:
-		if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
-			connected = true;
-		break;
-	case AMDGPU_HPD_4:
-		if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
-			connected = true;
-		break;
-	case AMDGPU_HPD_5:
-		if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
-			connected = true;
-		break;
-	case AMDGPU_HPD_6:
-		if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
-			connected = true;
-		break;
-	default:
-		break;
-	}
+	if (hpd >= adev->mode_info.num_hpd)
+		return connected;
+
+	if (RREG32(DC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPDx_SENSE)
+		connected = true;
 
 	return connected;
 }
@@ -303,58 +281,15 @@ static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
 	u32 tmp;
 	bool connected = dce_v6_0_hpd_sense(adev, hpd);
 
-	switch (hpd) {
-	case AMDGPU_HPD_1:
-		tmp = RREG32(DC_HPD1_INT_CONTROL);
-		if (connected)
-			tmp &= ~DC_HPDx_INT_POLARITY;
-		else
-			tmp |= DC_HPDx_INT_POLARITY;
-		WREG32(DC_HPD1_INT_CONTROL, tmp);
-		break;
-	case AMDGPU_HPD_2:
-		tmp = RREG32(DC_HPD2_INT_CONTROL);
-		if (connected)
-			tmp &= ~DC_HPDx_INT_POLARITY;
-		else
-			tmp |= DC_HPDx_INT_POLARITY;
-		WREG32(DC_HPD2_INT_CONTROL, tmp);
-		break;
-	case AMDGPU_HPD_3:
-		tmp = RREG32(DC_HPD3_INT_CONTROL);
-		if (connected)
-			tmp &= ~DC_HPDx_INT_POLARITY;
-		else
-			tmp |= DC_HPDx_INT_POLARITY;
-		WREG32(DC_HPD3_INT_CONTROL, tmp);
-		break;
-	case AMDGPU_HPD_4:
-		tmp = RREG32(DC_HPD4_INT_CONTROL);
-		if (connected)
-			tmp &= ~DC_HPDx_INT_POLARITY;
-		else
-			tmp |= DC_HPDx_INT_POLARITY;
-		WREG32(DC_HPD4_INT_CONTROL, tmp);
-		break;
-	case AMDGPU_HPD_5:
-		tmp = RREG32(DC_HPD5_INT_CONTROL);
-		if (connected)
-			tmp &= ~DC_HPDx_INT_POLARITY;
-		else
-			tmp |= DC_HPDx_INT_POLARITY;
-		WREG32(DC_HPD5_INT_CONTROL, tmp);
-			break;
-	case AMDGPU_HPD_6:
-		tmp = RREG32(DC_HPD6_INT_CONTROL);
-		if (connected)
-			tmp &= ~DC_HPDx_INT_POLARITY;
-		else
-			tmp |= DC_HPDx_INT_POLARITY;
-		WREG32(DC_HPD6_INT_CONTROL, tmp);
-		break;
-	default:
-		break;
-	}
+	if (hpd >= adev->mode_info.num_hpd)
+		return;
+
+	tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+	if (connected)
+		tmp &= ~DC_HPDx_INT_POLARITY;
+	else
+		tmp |= DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
 }
 
 /**
@@ -369,34 +304,17 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
 {
 	struct drm_device *dev = adev->ddev;
 	struct drm_connector *connector;
-	u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
-		DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
+	u32 tmp;
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 
-		switch (amdgpu_connector->hpd.hpd) {
-		case AMDGPU_HPD_1:
-			WREG32(DC_HPD1_CONTROL, tmp);
-			break;
-		case AMDGPU_HPD_2:
-			WREG32(DC_HPD2_CONTROL, tmp);
-			break;
-		case AMDGPU_HPD_3:
-			WREG32(DC_HPD3_CONTROL, tmp);
-			break;
-		case AMDGPU_HPD_4:
-			WREG32(DC_HPD4_CONTROL, tmp);
-			break;
-		case AMDGPU_HPD_5:
-			WREG32(DC_HPD5_CONTROL, tmp);
-			break;
-		case AMDGPU_HPD_6:
-			WREG32(DC_HPD6_CONTROL, tmp);
-			break;
-		default:
-			break;
-		}
+		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
+			continue;
+
+		tmp = RREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+		tmp |= DC_HPDx_EN;
+		WREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 
 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
 		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -405,34 +323,9 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
 			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
 			 * also avoid interrupt storms during dpms.
 			 */
-			u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
-
-			switch (amdgpu_connector->hpd.hpd) {
-			case AMDGPU_HPD_1:
-				dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
-				break;
-			case AMDGPU_HPD_2:
-				dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
-				break;
-			case AMDGPU_HPD_3:
-				dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
-				break;
-			case AMDGPU_HPD_4:
-				dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
-				break;
-			case AMDGPU_HPD_5:
-				dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
-				break;
-			case AMDGPU_HPD_6:
-				dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
-				break;
-			default:
-				continue;
-			}
-
-			dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
-			dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
-			WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+			tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+			tmp &= ~DC_HPDx_INT_EN;
+			WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 			continue;
 		}
 
@@ -454,32 +347,18 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
 {
 	struct drm_device *dev = adev->ddev;
 	struct drm_connector *connector;
+	u32 tmp;
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 
-		switch (amdgpu_connector->hpd.hpd) {
-		case AMDGPU_HPD_1:
-			WREG32(DC_HPD1_CONTROL, 0);
-			break;
-		case AMDGPU_HPD_2:
-			WREG32(DC_HPD2_CONTROL, 0);
-			break;
-		case AMDGPU_HPD_3:
-			WREG32(DC_HPD3_CONTROL, 0);
-			break;
-		case AMDGPU_HPD_4:
-			WREG32(DC_HPD4_CONTROL, 0);
-			break;
-		case AMDGPU_HPD_5:
-			WREG32(DC_HPD5_CONTROL, 0);
-			break;
-		case AMDGPU_HPD_6:
-			WREG32(DC_HPD6_CONTROL, 0);
-			break;
-		default:
-			break;
-		}
+		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
+			continue;
+
+		tmp = RREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+		tmp &= ~DC_HPDx_EN;
+		WREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
+
 		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 	}
 }
@@ -611,12 +490,55 @@ static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
 static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
 					  bool render)
 {
-	if (!render) 
+	if (!render)
 		WREG32(R_000300_VGA_RENDER_CONTROL,
 			RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
 
 }
 
+static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
+{
+	int num_crtc = 0;
+
+	switch (adev->asic_type) {
+	case CHIP_TAHITI:
+	case CHIP_PITCAIRN:
+	case CHIP_VERDE:
+		num_crtc = 6;
+		break;
+	case CHIP_OLAND:
+		num_crtc = 2;
+		break;
+	default:
+		num_crtc = 0;
+	}
+	return num_crtc;
+}
+
+void dce_v6_0_disable_dce(struct amdgpu_device *adev)
+{
+	/*Disable VGA render and enabled crtc, if has DCE engine*/
+	if (amdgpu_atombios_has_dce_engine_info(adev)) {
+		u32 tmp;
+		int crtc_enabled, i;
+
+		dce_v6_0_set_vga_render_state(adev, false);
+
+		/*Disable crtc*/
+		for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
+			crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) &
+				EVERGREEN_CRTC_MASTER_EN;
+			if (crtc_enabled) {
+				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+				tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+				tmp &= ~EVERGREEN_CRTC_MASTER_EN;
+				WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+			}
+		}
+	}
+}
+
 static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
 {
 
@@ -2338,21 +2260,20 @@ static int dce_v6_0_early_init(void *handle)
 	dce_v6_0_set_display_funcs(adev);
 	dce_v6_0_set_irq_funcs(adev);
 
+	adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
+
 	switch (adev->asic_type) {
 	case CHIP_TAHITI:
 	case CHIP_PITCAIRN:
 	case CHIP_VERDE:
-		adev->mode_info.num_crtc = 6;
 		adev->mode_info.num_hpd = 6;
 		adev->mode_info.num_dig = 6;
 		break;
 	case CHIP_OLAND:
-		adev->mode_info.num_crtc = 2;
 		adev->mode_info.num_hpd = 2;
 		adev->mode_info.num_dig = 2;
 		break;
 	default:
-		/* FIXME: not supported yet */
 		return -EINVAL;
 	}
 
@@ -2588,42 +2509,23 @@ static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
 					    unsigned type,
 					    enum amdgpu_interrupt_state state)
 {
-	u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+	u32 dc_hpd_int_cntl;
 
-	switch (type) {
-	case AMDGPU_HPD_1:
-		dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
-		break;
-	case AMDGPU_HPD_2:
-		dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
-		break;
-	case AMDGPU_HPD_3:
-		dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
-		break;
-	case AMDGPU_HPD_4:
-		dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
-		break;
-	case AMDGPU_HPD_5:
-		dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
-		break;
-	case AMDGPU_HPD_6:
-		dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
-		break;
-	default:
+	if (type >= adev->mode_info.num_hpd) {
 		DRM_DEBUG("invalid hdp %d\n", type);
 		return 0;
 	}
 
 	switch (state) {
 	case AMDGPU_IRQ_STATE_DISABLE:
-		dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
-		dc_hpd_int_cntl &= ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
-		WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+		dc_hpd_int_cntl = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type]);
+		dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
+		WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
 		break;
 	case AMDGPU_IRQ_STATE_ENABLE:
-		dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
-		dc_hpd_int_cntl |= (DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
-		WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+		dc_hpd_int_cntl = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type]);
+		dc_hpd_int_cntl |= DC_HPDx_INT_EN;
+		WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
 		break;
 	default:
 		break;
@@ -2796,7 +2698,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
 			    struct amdgpu_irq_src *source,
 			    struct amdgpu_iv_entry *entry)
 {
-	uint32_t disp_int, mask, int_control, tmp;
+	uint32_t disp_int, mask, tmp;
 	unsigned hpd;
 
 	if (entry->src_data >= adev->mode_info.num_hpd) {
@@ -2807,12 +2709,11 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
 	hpd = entry->src_data;
 	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
 	mask = interrupt_status_offsets[hpd].hpd;
-	int_control = hpd_int_control_offsets[hpd];
 
 	if (disp_int & mask) {
-		tmp = RREG32(int_control);
+		tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
 		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
-		WREG32(int_control, tmp);
+		WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
 		schedule_work(&adev->hotplug_work);
 		DRM_INFO("IH: HPD%d\n", hpd + 1);
 	}
@@ -2833,7 +2734,7 @@ static int dce_v6_0_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs dce_v6_0_ip_funcs = {
+static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
 	.name = "dce_v6_0",
 	.early_init = dce_v6_0_early_init,
 	.late_init = NULL,
@@ -3174,3 +3075,21 @@ static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
 	adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
 	adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
 }
+
+const struct amdgpu_ip_block_version dce_v6_0_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_DCE,
+	.major = 6,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &dce_v6_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v6_4_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_DCE,
+	.major = 6,
+	.minor = 4,
+	.rev = 0,
+	.funcs = &dce_v6_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
index 6a5528105bb652543c1aa02ea2c2fcf08e4e4823..7b546b596de11e0dbfd01cb203afff22c210ad67 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
@@ -24,6 +24,9 @@
 #ifndef __DCE_V6_0_H__
 #define __DCE_V6_0_H__
 
-extern const struct amd_ip_funcs dce_v6_0_ip_funcs;
+extern const struct amdgpu_ip_block_version dce_v6_0_ip_block;
+extern const struct amdgpu_ip_block_version dce_v6_4_ip_block;
+
+void dce_v6_0_disable_dce(struct amdgpu_device *adev);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 5966166ec94c886d48035f1492b42ea89a24d354..979aedf4b74d3b3aea5f27b4d98fc765d02a78bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -31,6 +31,7 @@
 #include "atombios_encoders.h"
 #include "amdgpu_pll.h"
 #include "amdgpu_connectors.h"
+#include "dce_v8_0.h"
 
 #include "dce/dce_8_0_d.h"
 #include "dce/dce_8_0_sh_mask.h"
@@ -56,6 +57,16 @@ static const u32 crtc_offsets[6] =
 	CRTC5_REGISTER_OFFSET
 };
 
+static const u32 hpd_offsets[] =
+{
+	HPD0_REGISTER_OFFSET,
+	HPD1_REGISTER_OFFSET,
+	HPD2_REGISTER_OFFSET,
+	HPD3_REGISTER_OFFSET,
+	HPD4_REGISTER_OFFSET,
+	HPD5_REGISTER_OFFSET
+};
+
 static const uint32_t dig_offsets[] = {
 	CRTC0_REGISTER_OFFSET,
 	CRTC1_REGISTER_OFFSET,
@@ -104,15 +115,6 @@ static const struct {
 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
 } };
 
-static const uint32_t hpd_int_control_offsets[6] = {
-	mmDC_HPD1_INT_CONTROL,
-	mmDC_HPD2_INT_CONTROL,
-	mmDC_HPD3_INT_CONTROL,
-	mmDC_HPD4_INT_CONTROL,
-	mmDC_HPD5_INT_CONTROL,
-	mmDC_HPD6_INT_CONTROL,
-};
-
 static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
 				     u32 block_offset, u32 reg)
 {
@@ -278,34 +280,12 @@ static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
 {
 	bool connected = false;
 
-	switch (hpd) {
-	case AMDGPU_HPD_1:
-		if (RREG32(mmDC_HPD1_INT_STATUS) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
-			connected = true;
-		break;
-	case AMDGPU_HPD_2:
-		if (RREG32(mmDC_HPD2_INT_STATUS) & DC_HPD2_INT_STATUS__DC_HPD2_SENSE_MASK)
-			connected = true;
-		break;
-	case AMDGPU_HPD_3:
-		if (RREG32(mmDC_HPD3_INT_STATUS) & DC_HPD3_INT_STATUS__DC_HPD3_SENSE_MASK)
-			connected = true;
-		break;
-	case AMDGPU_HPD_4:
-		if (RREG32(mmDC_HPD4_INT_STATUS) & DC_HPD4_INT_STATUS__DC_HPD4_SENSE_MASK)
-			connected = true;
-		break;
-	case AMDGPU_HPD_5:
-		if (RREG32(mmDC_HPD5_INT_STATUS) & DC_HPD5_INT_STATUS__DC_HPD5_SENSE_MASK)
-			connected = true;
-		break;
-	case AMDGPU_HPD_6:
-		if (RREG32(mmDC_HPD6_INT_STATUS) & DC_HPD6_INT_STATUS__DC_HPD6_SENSE_MASK)
-			connected = true;
-		break;
-	default:
-		break;
-	}
+	if (hpd >= adev->mode_info.num_hpd)
+		return connected;
+
+	if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
+	    DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
+		connected = true;
 
 	return connected;
 }
@@ -324,58 +304,15 @@ static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
 	u32 tmp;
 	bool connected = dce_v8_0_hpd_sense(adev, hpd);
 
-	switch (hpd) {
-	case AMDGPU_HPD_1:
-		tmp = RREG32(mmDC_HPD1_INT_CONTROL);
-		if (connected)
-			tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
-		else
-			tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
-		WREG32(mmDC_HPD1_INT_CONTROL, tmp);
-		break;
-	case AMDGPU_HPD_2:
-		tmp = RREG32(mmDC_HPD2_INT_CONTROL);
-		if (connected)
-			tmp &= ~DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
-		else
-			tmp |= DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
-		WREG32(mmDC_HPD2_INT_CONTROL, tmp);
-		break;
-	case AMDGPU_HPD_3:
-		tmp = RREG32(mmDC_HPD3_INT_CONTROL);
-		if (connected)
-			tmp &= ~DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
-		else
-			tmp |= DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
-		WREG32(mmDC_HPD3_INT_CONTROL, tmp);
-		break;
-	case AMDGPU_HPD_4:
-		tmp = RREG32(mmDC_HPD4_INT_CONTROL);
-		if (connected)
-			tmp &= ~DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
-		else
-			tmp |= DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
-		WREG32(mmDC_HPD4_INT_CONTROL, tmp);
-		break;
-	case AMDGPU_HPD_5:
-		tmp = RREG32(mmDC_HPD5_INT_CONTROL);
-		if (connected)
-			tmp &= ~DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
-		else
-			tmp |= DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
-		WREG32(mmDC_HPD5_INT_CONTROL, tmp);
-			break;
-	case AMDGPU_HPD_6:
-		tmp = RREG32(mmDC_HPD6_INT_CONTROL);
-		if (connected)
-			tmp &= ~DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
-		else
-			tmp |= DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
-		WREG32(mmDC_HPD6_INT_CONTROL, tmp);
-		break;
-	default:
-		break;
-	}
+	if (hpd >= adev->mode_info.num_hpd)
+		return;
+
+	tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+	if (connected)
+		tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
+	else
+		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
+	WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
 }
 
 /**
@@ -390,35 +327,17 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
 {
 	struct drm_device *dev = adev->ddev;
 	struct drm_connector *connector;
-	u32 tmp = (0x9c4 << DC_HPD1_CONTROL__DC_HPD1_CONNECTION_TIMER__SHIFT) |
-		(0xfa << DC_HPD1_CONTROL__DC_HPD1_RX_INT_TIMER__SHIFT) |
-		DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
+	u32 tmp;
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 
-		switch (amdgpu_connector->hpd.hpd) {
-		case AMDGPU_HPD_1:
-			WREG32(mmDC_HPD1_CONTROL, tmp);
-			break;
-		case AMDGPU_HPD_2:
-			WREG32(mmDC_HPD2_CONTROL, tmp);
-			break;
-		case AMDGPU_HPD_3:
-			WREG32(mmDC_HPD3_CONTROL, tmp);
-			break;
-		case AMDGPU_HPD_4:
-			WREG32(mmDC_HPD4_CONTROL, tmp);
-			break;
-		case AMDGPU_HPD_5:
-			WREG32(mmDC_HPD5_CONTROL, tmp);
-			break;
-		case AMDGPU_HPD_6:
-			WREG32(mmDC_HPD6_CONTROL, tmp);
-			break;
-		default:
-			break;
-		}
+		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
+			continue;
+
+		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+		tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
+		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 
 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
 		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -427,34 +346,9 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
 			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
 			 * also avoid interrupt storms during dpms.
 			 */
-			u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
-
-			switch (amdgpu_connector->hpd.hpd) {
-			case AMDGPU_HPD_1:
-				dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
-				break;
-			case AMDGPU_HPD_2:
-				dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
-				break;
-			case AMDGPU_HPD_3:
-				dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
-				break;
-			case AMDGPU_HPD_4:
-				dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
-				break;
-			case AMDGPU_HPD_5:
-				dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
-				break;
-			case AMDGPU_HPD_6:
-				dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
-				break;
-			default:
-				continue;
-			}
-
-			dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
-			dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
-			WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+			tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+			tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+			WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 			continue;
 		}
 
@@ -475,32 +369,18 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
 {
 	struct drm_device *dev = adev->ddev;
 	struct drm_connector *connector;
+	u32 tmp;
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 
-		switch (amdgpu_connector->hpd.hpd) {
-		case AMDGPU_HPD_1:
-			WREG32(mmDC_HPD1_CONTROL, 0);
-			break;
-		case AMDGPU_HPD_2:
-			WREG32(mmDC_HPD2_CONTROL, 0);
-			break;
-		case AMDGPU_HPD_3:
-			WREG32(mmDC_HPD3_CONTROL, 0);
-			break;
-		case AMDGPU_HPD_4:
-			WREG32(mmDC_HPD4_CONTROL, 0);
-			break;
-		case AMDGPU_HPD_5:
-			WREG32(mmDC_HPD5_CONTROL, 0);
-			break;
-		case AMDGPU_HPD_6:
-			WREG32(mmDC_HPD6_CONTROL, 0);
-			break;
-		default:
-			break;
-		}
+		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
+			continue;
+
+		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+		tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
+		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
+
 		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 	}
 }
@@ -3204,42 +3084,23 @@ static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
 					    unsigned type,
 					    enum amdgpu_interrupt_state state)
 {
-	u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+	u32 dc_hpd_int_cntl;
 
-	switch (type) {
-	case AMDGPU_HPD_1:
-		dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
-		break;
-	case AMDGPU_HPD_2:
-		dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
-		break;
-	case AMDGPU_HPD_3:
-		dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
-		break;
-	case AMDGPU_HPD_4:
-		dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
-		break;
-	case AMDGPU_HPD_5:
-		dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
-		break;
-	case AMDGPU_HPD_6:
-		dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
-		break;
-	default:
+	if (type >= adev->mode_info.num_hpd) {
 		DRM_DEBUG("invalid hdp %d\n", type);
 		return 0;
 	}
 
 	switch (state) {
 	case AMDGPU_IRQ_STATE_DISABLE:
-		dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
 		dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
-		WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
 		break;
 	case AMDGPU_IRQ_STATE_ENABLE:
-		dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
 		dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
-		WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
 		break;
 	default:
 		break;
@@ -3412,7 +3273,7 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
 			    struct amdgpu_irq_src *source,
 			    struct amdgpu_iv_entry *entry)
 {
-	uint32_t disp_int, mask, int_control, tmp;
+	uint32_t disp_int, mask, tmp;
 	unsigned hpd;
 
 	if (entry->src_data >= adev->mode_info.num_hpd) {
@@ -3423,12 +3284,11 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
 	hpd = entry->src_data;
 	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
 	mask = interrupt_status_offsets[hpd].hpd;
-	int_control = hpd_int_control_offsets[hpd];
 
 	if (disp_int & mask) {
-		tmp = RREG32(int_control);
+		tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
 		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
-		WREG32(int_control, tmp);
+		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
 		schedule_work(&adev->hotplug_work);
 		DRM_DEBUG("IH: HPD%d\n", hpd + 1);
 	}
@@ -3449,7 +3309,7 @@ static int dce_v8_0_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs dce_v8_0_ip_funcs = {
+static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
 	.name = "dce_v8_0",
 	.early_init = dce_v8_0_early_init,
 	.late_init = NULL,
@@ -3779,3 +3639,48 @@ static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
 	adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
 	adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
 }
+
+const struct amdgpu_ip_block_version dce_v8_0_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_DCE,
+	.major = 8,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &dce_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v8_1_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_DCE,
+	.major = 8,
+	.minor = 1,
+	.rev = 0,
+	.funcs = &dce_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v8_2_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_DCE,
+	.major = 8,
+	.minor = 2,
+	.rev = 0,
+	.funcs = &dce_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v8_3_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_DCE,
+	.major = 8,
+	.minor = 3,
+	.rev = 0,
+	.funcs = &dce_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version dce_v8_5_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_DCE,
+	.major = 8,
+	.minor = 5,
+	.rev = 0,
+	.funcs = &dce_v8_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
index 7d0770c3a49bd3ee67128819b56d5c5f74cc66fc..13b802dd946af75e234f520bcd09ac4edc93a9aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
@@ -24,7 +24,11 @@
 #ifndef __DCE_V8_0_H__
 #define __DCE_V8_0_H__
 
-extern const struct amd_ip_funcs dce_v8_0_ip_funcs;
+extern const struct amdgpu_ip_block_version dce_v8_0_ip_block;
+extern const struct amdgpu_ip_block_version dce_v8_1_ip_block;
+extern const struct amdgpu_ip_block_version dce_v8_2_ip_block;
+extern const struct amdgpu_ip_block_version dce_v8_3_ip_block;
+extern const struct amdgpu_ip_block_version dce_v8_5_ip_block;
 
 void dce_v8_0_disable_dce(struct amdgpu_device *adev);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index c2bd9f045532b3f24b6f34004233ec9ed4e70614..cc85676a68d9f78b53ac2281e309c77d35e94eb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -27,6 +27,9 @@
 #include "atom.h"
 #include "amdgpu_pll.h"
 #include "amdgpu_connectors.h"
+#ifdef CONFIG_DRM_AMDGPU_SI
+#include "dce_v6_0.h"
+#endif
 #ifdef CONFIG_DRM_AMDGPU_CIK
 #include "dce_v8_0.h"
 #endif
@@ -34,11 +37,13 @@
 #include "dce_v11_0.h"
 #include "dce_virtual.h"
 
+#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
+
+
 static void dce_virtual_set_display_funcs(struct amdgpu_device *adev);
 static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev);
-static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
-				  struct amdgpu_irq_src *source,
-				  struct amdgpu_iv_entry *entry);
+static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
+					      int index);
 
 /**
  * dce_virtual_vblank_wait - vblank wait asic callback.
@@ -99,6 +104,14 @@ static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
 			      struct amdgpu_mode_mc_save *save)
 {
 	switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+	case CHIP_TAHITI:
+	case CHIP_PITCAIRN:
+	case CHIP_VERDE:
+	case CHIP_OLAND:
+		dce_v6_0_disable_dce(adev);
+		break;
+#endif
 #ifdef CONFIG_DRM_AMDGPU_CIK
 	case CHIP_BONAIRE:
 	case CHIP_HAWAII:
@@ -119,6 +132,9 @@ static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
 		dce_v11_0_disable_dce(adev);
 		break;
 	case CHIP_TOPAZ:
+#ifdef CONFIG_DRM_AMDGPU_SI
+	case CHIP_HAINAN:
+#endif
 		/* no DCE */
 		return;
 	default:
@@ -195,10 +211,9 @@ static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
 	switch (mode) {
 	case DRM_MODE_DPMS_ON:
 		amdgpu_crtc->enabled = true;
-		/* Make sure VBLANK and PFLIP interrupts are still enabled */
+		/* Make sure VBLANK interrupts are still enabled */
 		type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
 		amdgpu_irq_update(adev, &adev->crtc_irq, type);
-		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
 		drm_vblank_on(dev, amdgpu_crtc->crtc_id);
 		break;
 	case DRM_MODE_DPMS_STANDBY:
@@ -264,24 +279,6 @@ static bool dce_virtual_crtc_mode_fixup(struct drm_crtc *crtc,
 				     const struct drm_display_mode *mode,
 				     struct drm_display_mode *adjusted_mode)
 {
-	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-	struct drm_device *dev = crtc->dev;
-	struct drm_encoder *encoder;
-
-	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-		if (encoder->crtc == crtc) {
-			amdgpu_crtc->encoder = encoder;
-			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
-			break;
-		}
-	}
-	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
-		amdgpu_crtc->encoder = NULL;
-		amdgpu_crtc->connector = NULL;
-		return false;
-	}
-
 	return true;
 }
 
@@ -341,6 +338,7 @@ static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
 	amdgpu_crtc->encoder = NULL;
 	amdgpu_crtc->connector = NULL;
+	amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
 	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs);
 
 	return 0;
@@ -350,48 +348,128 @@ static int dce_virtual_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	adev->mode_info.vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
 	dce_virtual_set_display_funcs(adev);
 	dce_virtual_set_irq_funcs(adev);
 
-	adev->mode_info.num_crtc = 1;
 	adev->mode_info.num_hpd = 1;
 	adev->mode_info.num_dig = 1;
 	return 0;
 }
 
-static bool dce_virtual_get_connector_info(struct amdgpu_device *adev)
+static struct drm_encoder *
+dce_virtual_encoder(struct drm_connector *connector)
 {
-	struct amdgpu_i2c_bus_rec ddc_bus;
-	struct amdgpu_router router;
-	struct amdgpu_hpd hpd;
+	int enc_id = connector->encoder_ids[0];
+	struct drm_encoder *encoder;
+	int i;
 
-	/* look up gpio for ddc, hpd */
-	ddc_bus.valid = false;
-	hpd.hpd = AMDGPU_HPD_NONE;
-	/* needed for aux chan transactions */
-	ddc_bus.hpd = hpd.hpd;
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		if (connector->encoder_ids[i] == 0)
+			break;
 
-	memset(&router, 0, sizeof(router));
-	router.ddc_valid = false;
-	router.cd_valid = false;
-	amdgpu_display_add_connector(adev,
-				      0,
-				      ATOM_DEVICE_CRT1_SUPPORT,
-				      DRM_MODE_CONNECTOR_VIRTUAL, &ddc_bus,
-				      CONNECTOR_OBJECT_ID_VIRTUAL,
-				      &hpd,
-				      &router);
+		encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+		if (!encoder)
+			continue;
 
-	amdgpu_display_add_encoder(adev, ENCODER_VIRTUAL_ENUM_VIRTUAL,
-							ATOM_DEVICE_CRT1_SUPPORT,
-							0);
+		if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
+			return encoder;
+	}
 
-	amdgpu_link_encoder_connector(adev->ddev);
+	/* pick the first one */
+	if (enc_id)
+		return drm_encoder_find(connector->dev, enc_id);
+	return NULL;
+}
+
+static int dce_virtual_get_modes(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_display_mode *mode = NULL;
+	unsigned i;
+	static const struct mode_size {
+		int w;
+		int h;
+	} common_modes[17] = {
+		{ 640,  480},
+		{ 720,  480},
+		{ 800,  600},
+		{ 848,  480},
+		{1024,  768},
+		{1152,  768},
+		{1280,  720},
+		{1280,  800},
+		{1280,  854},
+		{1280,  960},
+		{1280, 1024},
+		{1440,  900},
+		{1400, 1050},
+		{1680, 1050},
+		{1600, 1200},
+		{1920, 1080},
+		{1920, 1200}
+	};
+
+	for (i = 0; i < 17; i++) {
+		mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
+		drm_mode_probed_add(connector, mode);
+	}
 
-	return true;
+	return 0;
+}
+
+static int dce_virtual_mode_valid(struct drm_connector *connector,
+				  struct drm_display_mode *mode)
+{
+	return MODE_OK;
+}
+
+static int
+dce_virtual_dpms(struct drm_connector *connector, int mode)
+{
+	return 0;
 }
 
+static enum drm_connector_status
+dce_virtual_detect(struct drm_connector *connector, bool force)
+{
+	return connector_status_connected;
+}
+
+static int
+dce_virtual_set_property(struct drm_connector *connector,
+			 struct drm_property *property,
+			 uint64_t val)
+{
+	return 0;
+}
+
+static void dce_virtual_destroy(struct drm_connector *connector)
+{
+	drm_connector_unregister(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static void dce_virtual_force(struct drm_connector *connector)
+{
+	return;
+}
+
+static const struct drm_connector_helper_funcs dce_virtual_connector_helper_funcs = {
+	.get_modes = dce_virtual_get_modes,
+	.mode_valid = dce_virtual_mode_valid,
+	.best_encoder = dce_virtual_encoder,
+};
+
+static const struct drm_connector_funcs dce_virtual_connector_funcs = {
+	.dpms = dce_virtual_dpms,
+	.detect = dce_virtual_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = dce_virtual_set_property,
+	.destroy = dce_virtual_destroy,
+	.force = dce_virtual_force,
+};
+
 static int dce_virtual_sw_init(void *handle)
 {
 	int r, i;
@@ -420,16 +498,16 @@ static int dce_virtual_sw_init(void *handle)
 	adev->ddev->mode_config.max_width = 16384;
 	adev->ddev->mode_config.max_height = 16384;
 
-	/* allocate crtcs */
+	/* allocate crtcs, encoders, connectors */
 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 		r = dce_virtual_crtc_init(adev, i);
 		if (r)
 			return r;
+		r = dce_virtual_connector_encoder_init(adev, i);
+		if (r)
+			return r;
 	}
 
-	dce_virtual_get_connector_info(adev);
-	amdgpu_print_display_setup(adev->ddev);
-
 	drm_kms_helper_poll_init(adev->ddev);
 
 	adev->mode_info.mode_config_initialized = true;
@@ -496,7 +574,7 @@ static int dce_virtual_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs dce_virtual_ip_funcs = {
+static const struct amd_ip_funcs dce_virtual_ip_funcs = {
 	.name = "dce_virtual",
 	.early_init = dce_virtual_early_init,
 	.late_init = NULL,
@@ -526,8 +604,8 @@ static void dce_virtual_encoder_commit(struct drm_encoder *encoder)
 
 static void
 dce_virtual_encoder_mode_set(struct drm_encoder *encoder,
-		      struct drm_display_mode *mode,
-		      struct drm_display_mode *adjusted_mode)
+			     struct drm_display_mode *mode,
+			     struct drm_display_mode *adjusted_mode)
 {
 	return;
 }
@@ -547,10 +625,6 @@ static bool dce_virtual_encoder_mode_fixup(struct drm_encoder *encoder,
 				    const struct drm_display_mode *mode,
 				    struct drm_display_mode *adjusted_mode)
 {
-
-	/* set the active encoder to connector routing */
-	amdgpu_encoder_set_active_device(encoder);
-
 	return true;
 }
 
@@ -576,45 +650,40 @@ static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
 	.destroy = dce_virtual_encoder_destroy,
 };
 
-static void dce_virtual_encoder_add(struct amdgpu_device *adev,
-				 uint32_t encoder_enum,
-				 uint32_t supported_device,
-				 u16 caps)
+static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
+					      int index)
 {
-	struct drm_device *dev = adev->ddev;
 	struct drm_encoder *encoder;
-	struct amdgpu_encoder *amdgpu_encoder;
-
-	/* see if we already added it */
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-		amdgpu_encoder = to_amdgpu_encoder(encoder);
-		if (amdgpu_encoder->encoder_enum == encoder_enum) {
-			amdgpu_encoder->devices |= supported_device;
-			return;
-		}
+	struct drm_connector *connector;
+
+	/* add a new encoder */
+	encoder = kzalloc(sizeof(struct drm_encoder), GFP_KERNEL);
+	if (!encoder)
+		return -ENOMEM;
+	encoder->possible_crtcs = 1 << index;
+	drm_encoder_init(adev->ddev, encoder, &dce_virtual_encoder_funcs,
+			 DRM_MODE_ENCODER_VIRTUAL, NULL);
+	drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
 
+	connector = kzalloc(sizeof(struct drm_connector), GFP_KERNEL);
+	if (!connector) {
+		kfree(encoder);
+		return -ENOMEM;
 	}
 
-	/* add a new one */
-	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
-	if (!amdgpu_encoder)
-		return;
+	/* add a new connector */
+	drm_connector_init(adev->ddev, connector, &dce_virtual_connector_funcs,
+			   DRM_MODE_CONNECTOR_VIRTUAL);
+	drm_connector_helper_add(connector, &dce_virtual_connector_helper_funcs);
+	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+	connector->interlace_allowed = false;
+	connector->doublescan_allowed = false;
+	drm_connector_register(connector);
 
-	encoder = &amdgpu_encoder->base;
-	encoder->possible_crtcs = 0x1;
-	amdgpu_encoder->enc_priv = NULL;
-	amdgpu_encoder->encoder_enum = encoder_enum;
-	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
-	amdgpu_encoder->devices = supported_device;
-	amdgpu_encoder->rmx_type = RMX_OFF;
-	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
-	amdgpu_encoder->is_ext_encoder = false;
-	amdgpu_encoder->caps = caps;
-
-	drm_encoder_init(dev, encoder, &dce_virtual_encoder_funcs,
-					 DRM_MODE_ENCODER_VIRTUAL, NULL);
-	drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
-	DRM_INFO("[FM]encoder: %d is VIRTUAL\n", amdgpu_encoder->encoder_id);
+	/* link them */
+	drm_mode_connector_attach_encoder(connector, encoder);
+
+	return 0;
 }
 
 static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
@@ -630,8 +699,8 @@ static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
 	.hpd_get_gpio_reg = &dce_virtual_hpd_get_gpio_reg,
 	.page_flip = &dce_virtual_page_flip,
 	.page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos,
-	.add_encoder = &dce_virtual_encoder_add,
-	.add_connector = &amdgpu_connector_add,
+	.add_encoder = NULL,
+	.add_connector = NULL,
 	.stop_mc_access = &dce_virtual_stop_mc_access,
 	.resume_mc_access = &dce_virtual_resume_mc_access,
 };
@@ -642,107 +711,13 @@ static void dce_virtual_set_display_funcs(struct amdgpu_device *adev)
 		adev->mode_info.funcs = &dce_virtual_display_funcs;
 }
 
-static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer)
-{
-	struct amdgpu_mode_info *mode_info = container_of(vblank_timer, struct amdgpu_mode_info ,vblank_timer);
-	struct amdgpu_device *adev = container_of(mode_info, struct amdgpu_device ,mode_info);
-	unsigned crtc = 0;
-	drm_handle_vblank(adev->ddev, crtc);
-	dce_virtual_pageflip_irq(adev, NULL, NULL);
-	hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
-	return HRTIMER_NORESTART;
-}
-
-static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
-						     int crtc,
-						     enum amdgpu_interrupt_state state)
-{
-	if (crtc >= adev->mode_info.num_crtc) {
-		DRM_DEBUG("invalid crtc %d\n", crtc);
-		return;
-	}
-
-	if (state && !adev->mode_info.vsync_timer_enabled) {
-		DRM_DEBUG("Enable software vsync timer\n");
-		hrtimer_init(&adev->mode_info.vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-		hrtimer_set_expires(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD));
-		adev->mode_info.vblank_timer.function = dce_virtual_vblank_timer_handle;
-		hrtimer_start(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
-	} else if (!state && adev->mode_info.vsync_timer_enabled) {
-		DRM_DEBUG("Disable software vsync timer\n");
-		hrtimer_cancel(&adev->mode_info.vblank_timer);
-	}
-
-	adev->mode_info.vsync_timer_enabled = state;
-	DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
-}
-
-
-static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev,
-                                       struct amdgpu_irq_src *source,
-                                       unsigned type,
-                                       enum amdgpu_interrupt_state state)
-{
-	switch (type) {
-	case AMDGPU_CRTC_IRQ_VBLANK1:
-		dce_virtual_set_crtc_vblank_interrupt_state(adev, 0, state);
-		break;
-	default:
-		break;
-	}
-	return 0;
-}
-
-static void dce_virtual_crtc_vblank_int_ack(struct amdgpu_device *adev,
-					  int crtc)
-{
-	if (crtc >= adev->mode_info.num_crtc) {
-		DRM_DEBUG("invalid crtc %d\n", crtc);
-		return;
-	}
-}
-
-static int dce_virtual_crtc_irq(struct amdgpu_device *adev,
-			      struct amdgpu_irq_src *source,
-			      struct amdgpu_iv_entry *entry)
-{
-	unsigned crtc = 0;
-	unsigned irq_type = AMDGPU_CRTC_IRQ_VBLANK1;
-
-	dce_virtual_crtc_vblank_int_ack(adev, crtc);
-
-	if (amdgpu_irq_enabled(adev, source, irq_type)) {
-		drm_handle_vblank(adev->ddev, crtc);
-	}
-	dce_virtual_pageflip_irq(adev, NULL, NULL);
-	DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
-	return 0;
-}
-
-static int dce_virtual_set_pageflip_irq_state(struct amdgpu_device *adev,
-					    struct amdgpu_irq_src *src,
-					    unsigned type,
-					    enum amdgpu_interrupt_state state)
-{
-	if (type >= adev->mode_info.num_crtc) {
-		DRM_ERROR("invalid pageflip crtc %d\n", type);
-		return -EINVAL;
-	}
-	DRM_DEBUG("[FM]set pageflip irq type %d state %d\n", type, state);
-
-	return 0;
-}
-
-static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
-				  struct amdgpu_irq_src *source,
-				  struct amdgpu_iv_entry *entry)
+static int dce_virtual_pageflip(struct amdgpu_device *adev,
+				unsigned crtc_id)
 {
 	unsigned long flags;
-	unsigned crtc_id = 0;
 	struct amdgpu_crtc *amdgpu_crtc;
 	struct amdgpu_flip_work *works;
 
-	crtc_id = 0;
 	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
 
 	if (crtc_id >= adev->mode_info.num_crtc) {
@@ -781,22 +756,79 @@ static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
 	return 0;
 }
 
+static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer)
+{
+	struct amdgpu_crtc *amdgpu_crtc = container_of(vblank_timer,
+						       struct amdgpu_crtc, vblank_timer);
+	struct drm_device *ddev = amdgpu_crtc->base.dev;
+	struct amdgpu_device *adev = ddev->dev_private;
+
+	drm_handle_vblank(ddev, amdgpu_crtc->crtc_id);
+	dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id);
+	hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD),
+		      HRTIMER_MODE_REL);
+
+	return HRTIMER_NORESTART;
+}
+
+static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
+							int crtc,
+							enum amdgpu_interrupt_state state)
+{
+	if (crtc >= adev->mode_info.num_crtc) {
+		DRM_DEBUG("invalid crtc %d\n", crtc);
+		return;
+	}
+
+	if (state && !adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
+		DRM_DEBUG("Enable software vsync timer\n");
+		hrtimer_init(&adev->mode_info.crtcs[crtc]->vblank_timer,
+			     CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+		hrtimer_set_expires(&adev->mode_info.crtcs[crtc]->vblank_timer,
+				    ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD));
+		adev->mode_info.crtcs[crtc]->vblank_timer.function =
+			dce_virtual_vblank_timer_handle;
+		hrtimer_start(&adev->mode_info.crtcs[crtc]->vblank_timer,
+			      ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
+	} else if (!state && adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
+		DRM_DEBUG("Disable software vsync timer\n");
+		hrtimer_cancel(&adev->mode_info.crtcs[crtc]->vblank_timer);
+	}
+
+	adev->mode_info.crtcs[crtc]->vsync_timer_enabled = state;
+	DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
+}
+
+
+static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev,
+					  struct amdgpu_irq_src *source,
+					  unsigned type,
+					  enum amdgpu_interrupt_state state)
+{
+	if (type > AMDGPU_CRTC_IRQ_VBLANK6)
+		return -EINVAL;
+
+	dce_virtual_set_crtc_vblank_interrupt_state(adev, type, state);
+
+	return 0;
+}
+
 static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = {
 	.set = dce_virtual_set_crtc_irq_state,
-	.process = dce_virtual_crtc_irq,
-};
-
-static const struct amdgpu_irq_src_funcs dce_virtual_pageflip_irq_funcs = {
-	.set = dce_virtual_set_pageflip_irq_state,
-	.process = dce_virtual_pageflip_irq,
+	.process = NULL,
 };
 
 static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev)
 {
 	adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
 	adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs;
-
-	adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
-	adev->pageflip_irq.funcs = &dce_virtual_pageflip_irq_funcs;
 }
 
+const struct amdgpu_ip_block_version dce_virtual_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_DCE,
+	.major = 1,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &dce_virtual_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.h b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
index e239243f6ebc19e6b79f5575927bb7b7e96cfd22..ed422012c8c65dd88d6f8845e369d0c2ae3926ae 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
@@ -24,8 +24,7 @@
 #ifndef __DCE_VIRTUAL_H__
 #define __DCE_VIRTUAL_H__
 
-extern const struct amd_ip_funcs dce_virtual_ip_funcs;
-#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
+extern const struct amdgpu_ip_block_version dce_virtual_ip_block;
 
 #endif
 
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 40abb6b81c095a677dd6f6992369d5db8f2ec4aa..96dd05dca694231d8185579ed6f7adb4b14bb9f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -1940,7 +1940,7 @@ static int gfx_v6_0_cp_resume(struct amdgpu_device *adev)
 
 static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 {
-	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
 	uint32_t seq = ring->fence_drv.sync_seq;
 	uint64_t addr = ring->fence_drv.gpu_addr;
 
@@ -1966,7 +1966,7 @@ static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
 					unsigned vm_id, uint64_t pd_addr)
 {
-	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
 
 	/* write new base address */
 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
@@ -2814,33 +2814,6 @@ static void gfx_v6_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
 	amdgpu_ring_write(ring, 0);
 }
 
-static unsigned gfx_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
-	return
-		6; /* gfx_v6_0_ring_emit_ib */
-}
-
-static unsigned gfx_v6_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
-{
-	return
-		5 + /* gfx_v6_0_ring_emit_hdp_flush */
-		5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
-		14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
-		7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
-		17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
-		3; /* gfx_v6_ring_emit_cntxcntl */
-}
-
-static unsigned gfx_v6_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
-{
-	return
-		5 + /* gfx_v6_0_ring_emit_hdp_flush */
-		5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
-		7 + /* gfx_v6_0_ring_emit_pipeline_sync */
-		17 + /* gfx_v6_0_ring_emit_vm_flush */
-		14 + 14 + 14; /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
-}
-
 static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
 	.get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter,
 	.select_se_sh = &gfx_v6_0_select_se_sh,
@@ -2896,9 +2869,7 @@ static int gfx_v6_0_sw_init(void *handle)
 		ring->ring_obj = NULL;
 		sprintf(ring->name, "gfx");
 		r = amdgpu_ring_init(adev, ring, 1024,
-				     0x80000000, 0xf,
-				     &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
-				     AMDGPU_RING_TYPE_GFX);
+				     &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
 		if (r)
 			return r;
 	}
@@ -2920,9 +2891,7 @@ static int gfx_v6_0_sw_init(void *handle)
 		sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
 		irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
 		r = amdgpu_ring_init(adev, ring, 1024,
-				     0x80000000, 0xf,
-				     &adev->gfx.eop_irq, irq_type,
-				     AMDGPU_RING_TYPE_COMPUTE);
+				     &adev->gfx.eop_irq, irq_type);
 		if (r)
 			return r;
 	}
@@ -3237,7 +3206,7 @@ static int gfx_v6_0_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
+static const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
 	.name = "gfx_v6_0",
 	.early_init = gfx_v6_0_early_init,
 	.late_init = NULL,
@@ -3255,10 +3224,20 @@ const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
 };
 
 static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
+	.type = AMDGPU_RING_TYPE_GFX,
+	.align_mask = 0xff,
+	.nop = 0x80000000,
 	.get_rptr = gfx_v6_0_ring_get_rptr,
 	.get_wptr = gfx_v6_0_ring_get_wptr,
 	.set_wptr = gfx_v6_0_ring_set_wptr_gfx,
-	.parse_cs = NULL,
+	.emit_frame_size =
+		5 + /* gfx_v6_0_ring_emit_hdp_flush */
+		5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
+		14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+		7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
+		17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
+		3, /* gfx_v6_ring_emit_cntxcntl */
+	.emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
 	.emit_ib = gfx_v6_0_ring_emit_ib,
 	.emit_fence = gfx_v6_0_ring_emit_fence,
 	.emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
@@ -3269,15 +3248,22 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
 	.test_ib = gfx_v6_0_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
 	.emit_cntxcntl = gfx_v6_ring_emit_cntxcntl,
-	.get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
-	.get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_gfx,
 };
 
 static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
+	.type = AMDGPU_RING_TYPE_COMPUTE,
+	.align_mask = 0xff,
+	.nop = 0x80000000,
 	.get_rptr = gfx_v6_0_ring_get_rptr,
 	.get_wptr = gfx_v6_0_ring_get_wptr,
 	.set_wptr = gfx_v6_0_ring_set_wptr_compute,
-	.parse_cs = NULL,
+	.emit_frame_size =
+		5 + /* gfx_v6_0_ring_emit_hdp_flush */
+		5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
+		7 + /* gfx_v6_0_ring_emit_pipeline_sync */
+		17 + /* gfx_v6_0_ring_emit_vm_flush */
+		14 + 14 + 14, /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+	.emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
 	.emit_ib = gfx_v6_0_ring_emit_ib,
 	.emit_fence = gfx_v6_0_ring_emit_fence,
 	.emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
@@ -3287,8 +3273,6 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
 	.test_ring = gfx_v6_0_ring_test_ring,
 	.test_ib = gfx_v6_0_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
-	.get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
-	.get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_compute,
 };
 
 static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -3360,3 +3344,12 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
 	cu_info->number = active_cu_number;
 	cu_info->ao_cu_mask = ao_cu_mask;
 }
+
+const struct amdgpu_ip_block_version gfx_v6_0_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_GFX,
+	.major = 6,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &gfx_v6_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
index b9657e72b2486ae7b4462468274b464a5b6fe835..ced6fc42f688d5ce66be4c32c00db7affd6391c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
@@ -24,6 +24,6 @@
 #ifndef __GFX_V6_0_H__
 #define __GFX_V6_0_H__
 
-extern const struct amd_ip_funcs gfx_v6_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gfx_v6_0_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 71116da9e782d52597837b2dfa95a17b2007998d..903aa240e946f79b53ac249de83648019542d806 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2077,9 +2077,9 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
 static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 {
 	u32 ref_and_mask;
-	int usepfp = ring->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
+	int usepfp = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
 
-	if (ring->type == AMDGPU_RING_TYPE_COMPUTE) {
+	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
 		switch (ring->me) {
 		case 1:
 			ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
@@ -3222,7 +3222,7 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
  */
 static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 {
-	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
 	uint32_t seq = ring->fence_drv.sync_seq;
 	uint64_t addr = ring->fence_drv.gpu_addr;
 
@@ -3262,7 +3262,7 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
 					unsigned vm_id, uint64_t pd_addr)
 {
-	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
 
 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -3391,7 +3391,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
 		if (adev->gfx.rlc.save_restore_obj == NULL) {
 			r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
 					     AMDGPU_GEM_DOMAIN_VRAM,
-					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+					     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 					     NULL, NULL,
 					     &adev->gfx.rlc.save_restore_obj);
 			if (r) {
@@ -3435,7 +3436,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
 		if (adev->gfx.rlc.clear_state_obj == NULL) {
 			r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
 					     AMDGPU_GEM_DOMAIN_VRAM,
-					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+					     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 					     NULL, NULL,
 					     &adev->gfx.rlc.clear_state_obj);
 			if (r) {
@@ -3475,7 +3477,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
 		if (adev->gfx.rlc.cp_table_obj == NULL) {
 			r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
 					     AMDGPU_GEM_DOMAIN_VRAM,
-					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+					     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 					     NULL, NULL,
 					     &adev->gfx.rlc.cp_table_obj);
 			if (r) {
@@ -4354,44 +4357,40 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
 	amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
 }
 
-static unsigned gfx_v7_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
+static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
 {
-	return
-		4; /* gfx_v7_0_ring_emit_ib_gfx */
+	WREG32(mmSQ_IND_INDEX, (wave & 0xF) | ((simd & 0x3) << 4) | (address << 16) | (1 << 13));
+	return RREG32(mmSQ_IND_DATA);
 }
 
-static unsigned gfx_v7_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
+static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
 {
-	return
-		20 + /* gfx_v7_0_ring_emit_gds_switch */
-		7 + /* gfx_v7_0_ring_emit_hdp_flush */
-		5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
-		12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
-		7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
-		17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
-		3; /* gfx_v7_ring_emit_cntxcntl */
-}
-
-static unsigned gfx_v7_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
-{
-	return
-		4; /* gfx_v7_0_ring_emit_ib_compute */
-}
-
-static unsigned gfx_v7_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
-{
-	return
-		20 + /* gfx_v7_0_ring_emit_gds_switch */
-		7 + /* gfx_v7_0_ring_emit_hdp_flush */
-		5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
-		7 + /* gfx_v7_0_ring_emit_pipeline_sync */
-		17 + /* gfx_v7_0_ring_emit_vm_flush */
-		7 + 7 + 7; /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+	/* type 0 wave data */
+	dst[(*no_fields)++] = 0;
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
 }
 
 static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
 	.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
 	.select_se_sh = &gfx_v7_0_select_se_sh,
+	.read_wave_data = &gfx_v7_0_read_wave_data,
 };
 
 static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
@@ -4643,9 +4642,7 @@ static int gfx_v7_0_sw_init(void *handle)
 		ring->ring_obj = NULL;
 		sprintf(ring->name, "gfx");
 		r = amdgpu_ring_init(adev, ring, 1024,
-				     PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
-				     &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
-				     AMDGPU_RING_TYPE_GFX);
+				     &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
 		if (r)
 			return r;
 	}
@@ -4670,9 +4667,7 @@ static int gfx_v7_0_sw_init(void *handle)
 		irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
 		/* type-2 packets are deprecated on MEC, use type-3 instead */
 		r = amdgpu_ring_init(adev, ring, 1024,
-				     PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
-				     &adev->gfx.eop_irq, irq_type,
-				     AMDGPU_RING_TYPE_COMPUTE);
+				     &adev->gfx.eop_irq, irq_type);
 		if (r)
 			return r;
 	}
@@ -5123,7 +5118,7 @@ static int gfx_v7_0_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
+static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
 	.name = "gfx_v7_0",
 	.early_init = gfx_v7_0_early_init,
 	.late_init = gfx_v7_0_late_init,
@@ -5141,10 +5136,21 @@ const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
 };
 
 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
+	.type = AMDGPU_RING_TYPE_GFX,
+	.align_mask = 0xff,
+	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
 	.get_rptr = gfx_v7_0_ring_get_rptr,
 	.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
 	.set_wptr = gfx_v7_0_ring_set_wptr_gfx,
-	.parse_cs = NULL,
+	.emit_frame_size =
+		20 + /* gfx_v7_0_ring_emit_gds_switch */
+		7 + /* gfx_v7_0_ring_emit_hdp_flush */
+		5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
+		12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
+		7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
+		17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
+		3, /* gfx_v7_ring_emit_cntxcntl */
+	.emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */
 	.emit_ib = gfx_v7_0_ring_emit_ib_gfx,
 	.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
 	.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
@@ -5157,15 +5163,23 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
 	.insert_nop = amdgpu_ring_insert_nop,
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
-	.get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_gfx,
-	.get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_gfx,
 };
 
 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
+	.type = AMDGPU_RING_TYPE_COMPUTE,
+	.align_mask = 0xff,
+	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
 	.get_rptr = gfx_v7_0_ring_get_rptr,
 	.get_wptr = gfx_v7_0_ring_get_wptr_compute,
 	.set_wptr = gfx_v7_0_ring_set_wptr_compute,
-	.parse_cs = NULL,
+	.emit_frame_size =
+		20 + /* gfx_v7_0_ring_emit_gds_switch */
+		7 + /* gfx_v7_0_ring_emit_hdp_flush */
+		5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
+		7 + /* gfx_v7_0_ring_emit_pipeline_sync */
+		17 + /* gfx_v7_0_ring_emit_vm_flush */
+		7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+	.emit_ib_size =	4, /* gfx_v7_0_ring_emit_ib_compute */
 	.emit_ib = gfx_v7_0_ring_emit_ib_compute,
 	.emit_fence = gfx_v7_0_ring_emit_fence_compute,
 	.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
@@ -5177,8 +5191,6 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
 	.test_ib = gfx_v7_0_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
 	.pad_ib = amdgpu_ring_generic_pad_ib,
-	.get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_compute,
-	.get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_compute,
 };
 
 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -5289,3 +5301,39 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
 	cu_info->number = active_cu_number;
 	cu_info->ao_cu_mask = ao_cu_mask;
 }
+
+const struct amdgpu_ip_block_version gfx_v7_0_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_GFX,
+	.major = 7,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &gfx_v7_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gfx_v7_1_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_GFX,
+	.major = 7,
+	.minor = 1,
+	.rev = 0,
+	.funcs = &gfx_v7_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gfx_v7_2_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_GFX,
+	.major = 7,
+	.minor = 2,
+	.rev = 0,
+	.funcs = &gfx_v7_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gfx_v7_3_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_GFX,
+	.major = 7,
+	.minor = 3,
+	.rev = 0,
+	.funcs = &gfx_v7_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
index 94e3ea147c26ebae5e31f81d07b590002f58e79b..2f5164cc0e53b647b01ff16c1cdff66339fbf839 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
@@ -24,6 +24,9 @@
 #ifndef __GFX_V7_0_H__
 #define __GFX_V7_0_H__
 
-extern const struct amd_ip_funcs gfx_v7_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gfx_v7_0_ip_block;
+extern const struct amdgpu_ip_block_version gfx_v7_1_ip_block;
+extern const struct amdgpu_ip_block_version gfx_v7_2_ip_block;
+extern const struct amdgpu_ip_block_version gfx_v7_3_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index ee6a48a092143ae6952bbcce9af1f04dc5712614..1c2544f314c0966cb2f93ee5b3bddfe047147cd2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1058,6 +1058,19 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
 		adev->firmware.fw_size +=
 			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
 
+		/* we need account JT in */
+		cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+		adev->firmware.fw_size +=
+			ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
+
+		if (amdgpu_sriov_vf(adev)) {
+			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE];
+			info->ucode_id = AMDGPU_UCODE_ID_STORAGE;
+			info->fw = adev->gfx.mec_fw;
+			adev->firmware.fw_size +=
+				ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
+		}
+
 		if (adev->gfx.mec2_fw) {
 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
 			info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
@@ -1127,34 +1140,8 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
 	buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG -
 			PACKET3_SET_CONTEXT_REG_START);
-	switch (adev->asic_type) {
-	case CHIP_TONGA:
-	case CHIP_POLARIS10:
-		buffer[count++] = cpu_to_le32(0x16000012);
-		buffer[count++] = cpu_to_le32(0x0000002A);
-		break;
-	case CHIP_POLARIS11:
-		buffer[count++] = cpu_to_le32(0x16000012);
-		buffer[count++] = cpu_to_le32(0x00000000);
-		break;
-	case CHIP_FIJI:
-		buffer[count++] = cpu_to_le32(0x3a00161a);
-		buffer[count++] = cpu_to_le32(0x0000002e);
-		break;
-	case CHIP_TOPAZ:
-	case CHIP_CARRIZO:
-		buffer[count++] = cpu_to_le32(0x00000002);
-		buffer[count++] = cpu_to_le32(0x00000000);
-		break;
-	case CHIP_STONEY:
-		buffer[count++] = cpu_to_le32(0x00000000);
-		buffer[count++] = cpu_to_le32(0x00000000);
-		break;
-	default:
-		buffer[count++] = cpu_to_le32(0x00000000);
-		buffer[count++] = cpu_to_le32(0x00000000);
-		break;
-	}
+	buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
+	buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
 
 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
@@ -1273,7 +1260,8 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
 		if (adev->gfx.rlc.clear_state_obj == NULL) {
 			r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
 					     AMDGPU_GEM_DOMAIN_VRAM,
-					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+					     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 					     NULL, NULL,
 					     &adev->gfx.rlc.clear_state_obj);
 			if (r) {
@@ -1315,7 +1303,8 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
 		if (adev->gfx.rlc.cp_table_obj == NULL) {
 			r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
 					     AMDGPU_GEM_DOMAIN_VRAM,
-					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+					     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 					     NULL, NULL,
 					     &adev->gfx.rlc.cp_table_obj);
 			if (r) {
@@ -2045,10 +2034,8 @@ static int gfx_v8_0_sw_init(void *handle)
 			ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0;
 		}
 
-		r = amdgpu_ring_init(adev, ring, 1024,
-				     PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
-				     &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
-				     AMDGPU_RING_TYPE_GFX);
+		r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
+				     AMDGPU_CP_IRQ_GFX_EOP);
 		if (r)
 			return r;
 	}
@@ -2072,10 +2059,8 @@ static int gfx_v8_0_sw_init(void *handle)
 		sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
 		irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
 		/* type-2 packets are deprecated on MEC, use type-3 instead */
-		r = amdgpu_ring_init(adev, ring, 1024,
-				     PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
-				     &adev->gfx.eop_irq, irq_type,
-				     AMDGPU_RING_TYPE_COMPUTE);
+		r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
+				     irq_type);
 		if (r)
 			return r;
 	}
@@ -3679,6 +3664,21 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
 							num_rb_pipes);
 	}
 
+	/* cache the values for userspace */
+	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+			gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
+			adev->gfx.config.rb_config[i][j].rb_backend_disable =
+				RREG32(mmCC_RB_BACKEND_DISABLE);
+			adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
+				RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+			adev->gfx.config.rb_config[i][j].raster_config =
+				RREG32(mmPA_SC_RASTER_CONFIG);
+			adev->gfx.config.rb_config[i][j].raster_config_1 =
+				RREG32(mmPA_SC_RASTER_CONFIG_1);
+		}
+	}
+	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 	mutex_unlock(&adev->grbm_idx_mutex);
 }
 
@@ -4331,7 +4331,7 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
 	struct amdgpu_ring *ring;
 	u32 tmp;
 	u32 rb_bufsz;
-	u64 rb_addr, rptr_addr;
+	u64 rb_addr, rptr_addr, wptr_gpu_addr;
 	int r;
 
 	/* Set the write pointer delay */
@@ -4362,6 +4362,9 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
 	WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
 	WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
 
+	wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+	WREG32(mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
+	WREG32(mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
 	mdelay(1);
 	WREG32(mmCP_RB0_CNTL, tmp);
 
@@ -5438,9 +5441,41 @@ static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
 	amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
 }
 
+static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
+{
+	WREG32(mmSQ_IND_INDEX, (wave & 0xF) | ((simd & 0x3) << 4) | (address << 16) | (1 << 13));
+	return RREG32(mmSQ_IND_DATA);
+}
+
+static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
+{
+	/* type 0 wave data */
+	dst[(*no_fields)++] = 0;
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
+	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
+}
+
+
 static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
 	.get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
 	.select_se_sh = &gfx_v8_0_select_se_sh,
+	.read_wave_data = &gfx_v8_0_read_wave_data,
 };
 
 static int gfx_v8_0_early_init(void *handle)
@@ -6120,7 +6155,7 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 {
 	u32 ref_and_mask, reg_mem_engine;
 
-	if (ring->type == AMDGPU_RING_TYPE_COMPUTE) {
+	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
 		switch (ring->me) {
 		case 1:
 			ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
@@ -6222,7 +6257,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
 
 static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 {
-	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
 	uint32_t seq = ring->fence_drv.sync_seq;
 	uint64_t addr = ring->fence_drv.gpu_addr;
 
@@ -6240,11 +6275,7 @@ static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
 					unsigned vm_id, uint64_t pd_addr)
 {
-	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
-
-	/* GFX8 emits 128 dw nop to prevent DE do vm_flush before CE finish CEIB */
-	if (usepfp)
-		amdgpu_ring_insert_nop(ring, 128);
+	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
 
 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -6360,42 +6391,6 @@ static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
 	amdgpu_ring_write(ring, 0);
 }
 
-static unsigned gfx_v8_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
-{
-	return
-		4; /* gfx_v8_0_ring_emit_ib_gfx */
-}
-
-static unsigned gfx_v8_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
-{
-	return
-		20 + /* gfx_v8_0_ring_emit_gds_switch */
-		7 + /* gfx_v8_0_ring_emit_hdp_flush */
-		5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
-		6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
-		7 + /* gfx_v8_0_ring_emit_pipeline_sync */
-		256 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
-		2 + /* gfx_v8_ring_emit_sb */
-		3; /* gfx_v8_ring_emit_cntxcntl */
-}
-
-static unsigned gfx_v8_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
-{
-	return
-		4; /* gfx_v8_0_ring_emit_ib_compute */
-}
-
-static unsigned gfx_v8_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
-{
-	return
-		20 + /* gfx_v8_0_ring_emit_gds_switch */
-		7 + /* gfx_v8_0_ring_emit_hdp_flush */
-		5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
-		7 + /* gfx_v8_0_ring_emit_pipeline_sync */
-		17 + /* gfx_v8_0_ring_emit_vm_flush */
-		7 + 7 + 7; /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
-}
-
 static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
 						 enum amdgpu_interrupt_state state)
 {
@@ -6541,7 +6536,7 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
 	return 0;
 }
 
-const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
+static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
 	.name = "gfx_v8_0",
 	.early_init = gfx_v8_0_early_init,
 	.late_init = gfx_v8_0_late_init,
@@ -6562,10 +6557,22 @@ const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
 };
 
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
+	.type = AMDGPU_RING_TYPE_GFX,
+	.align_mask = 0xff,
+	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
 	.get_rptr = gfx_v8_0_ring_get_rptr,
 	.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
 	.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
-	.parse_cs = NULL,
+	.emit_frame_size =
+		20 + /* gfx_v8_0_ring_emit_gds_switch */
+		7 + /* gfx_v8_0_ring_emit_hdp_flush */
+		5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+		6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
+		7 + /* gfx_v8_0_ring_emit_pipeline_sync */
+		128 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
+		2 + /* gfx_v8_ring_emit_sb */
+		3, /* gfx_v8_ring_emit_cntxcntl */
+	.emit_ib_size =	4, /* gfx_v8_0_ring_emit_ib_gfx */
 	.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
 	.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
 	.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
@@ -6579,15 +6586,23 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.emit_switch_buffer = gfx_v8_ring_emit_sb,
 	.emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
-	.get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_gfx,
-	.get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_gfx,
 };
 
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
+	.type = AMDGPU_RING_TYPE_COMPUTE,
+	.align_mask = 0xff,
+	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
 	.get_rptr = gfx_v8_0_ring_get_rptr,
 	.get_wptr = gfx_v8_0_ring_get_wptr_compute,
 	.set_wptr = gfx_v8_0_ring_set_wptr_compute,
-	.parse_cs = NULL,
+	.emit_frame_size =
+		20 + /* gfx_v8_0_ring_emit_gds_switch */
+		7 + /* gfx_v8_0_ring_emit_hdp_flush */
+		5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+		7 + /* gfx_v8_0_ring_emit_pipeline_sync */
+		17 + /* gfx_v8_0_ring_emit_vm_flush */
+		7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
+	.emit_ib_size =	4, /* gfx_v8_0_ring_emit_ib_compute */
 	.emit_ib = gfx_v8_0_ring_emit_ib_compute,
 	.emit_fence = gfx_v8_0_ring_emit_fence_compute,
 	.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
@@ -6599,8 +6614,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
 	.test_ib = gfx_v8_0_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
 	.pad_ib = amdgpu_ring_generic_pad_ib,
-	.get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_compute,
-	.get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_compute,
 };
 
 static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -6753,3 +6766,21 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
 	cu_info->number = active_cu_number;
 	cu_info->ao_cu_mask = ao_cu_mask;
 }
+
+const struct amdgpu_ip_block_version gfx_v8_0_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_GFX,
+	.major = 8,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &gfx_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gfx_v8_1_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_GFX,
+	.major = 8,
+	.minor = 1,
+	.rev = 0,
+	.funcs = &gfx_v8_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
index ebed1f8292979de0055d4c0d943375fdeda77e9d..788cc3ab584bb732961e88a086607ec28ab7c679 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
@@ -24,6 +24,7 @@
 #ifndef __GFX_V8_0_H__
 #define __GFX_V8_0_H__
 
-extern const struct amd_ip_funcs gfx_v8_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gfx_v8_0_ip_block;
+extern const struct amdgpu_ip_block_version gfx_v8_1_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index b13c8aaec078e635f2d2d17b8d6332d0e80ae312..1940d36bc3040e4a41f702423a0e175a6f90a428 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -1030,7 +1030,7 @@ static int gmc_v6_0_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
+static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
 	.name = "gmc_v6_0",
 	.early_init = gmc_v6_0_early_init,
 	.late_init = gmc_v6_0_late_init,
@@ -1069,3 +1069,11 @@ static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
 	adev->mc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
 }
 
+const struct amdgpu_ip_block_version gmc_v6_0_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_GMC,
+	.major = 6,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &gmc_v6_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
index 42c4fc676cd4d6b4a8e1be9d39c31facfef18fa6..ed2f64dec47a0a83a7ecc0c32452031b2b51c581 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
@@ -24,6 +24,6 @@
 #ifndef __GMC_V6_0_H__
 #define __GMC_V6_0_H__
 
-extern const struct amd_ip_funcs gmc_v6_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gmc_v6_0_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index aa0c4b964621cd9726366fba08679394095b75f0..3a25f72980c1e78f395c6cad4fa7b1f7c11d620f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1235,7 +1235,7 @@ static int gmc_v7_0_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
+static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
 	.name = "gmc_v7_0",
 	.early_init = gmc_v7_0_early_init,
 	.late_init = gmc_v7_0_late_init,
@@ -1273,3 +1273,21 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
 	adev->mc.vm_fault.num_types = 1;
 	adev->mc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
 }
+
+const struct amdgpu_ip_block_version gmc_v7_0_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_GMC,
+	.major = 7,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &gmc_v7_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gmc_v7_4_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_GMC,
+	.major = 7,
+	.minor = 4,
+	.rev = 0,
+	.funcs = &gmc_v7_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
index 0b386b5d2f7aabdc3b2fd89ca625733049fa7a89..ebce2966c1c745216dfee507b0aa27025eabb6b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
@@ -24,6 +24,7 @@
 #ifndef __GMC_V7_0_H__
 #define __GMC_V7_0_H__
 
-extern const struct amd_ip_funcs gmc_v7_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gmc_v7_0_ip_block;
+extern const struct amdgpu_ip_block_version gmc_v7_4_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index c22ef140a54215e5253b7c9605a358a4785f8c05..74d7cc3f7e8c8b3ca6e72cc79987e680d7ed58b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1436,7 +1436,7 @@ static int gmc_v8_0_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
+static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
 	.name = "gmc_v8_0",
 	.early_init = gmc_v8_0_early_init,
 	.late_init = gmc_v8_0_late_init,
@@ -1477,3 +1477,30 @@ static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
 	adev->mc.vm_fault.num_types = 1;
 	adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
 }
+
+const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_GMC,
+	.major = 8,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &gmc_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_GMC,
+	.major = 8,
+	.minor = 1,
+	.rev = 0,
+	.funcs = &gmc_v8_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version gmc_v8_5_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_GMC,
+	.major = 8,
+	.minor = 5,
+	.rev = 0,
+	.funcs = &gmc_v8_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
index fc5001a8119dabab06e0e45888c6670b48163e77..19b8a8aed2046904a1018ab2b9b61503de4a8409 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
@@ -24,6 +24,8 @@
 #ifndef __GMC_V8_0_H__
 #define __GMC_V8_0_H__
 
-extern const struct amd_ip_funcs gmc_v8_0_ip_funcs;
+extern const struct amdgpu_ip_block_version gmc_v8_0_ip_block;
+extern const struct amdgpu_ip_block_version gmc_v8_1_ip_block;
+extern const struct amdgpu_ip_block_version gmc_v8_5_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 3b8906ce3511274f302b7f23bbca5dd6d53fb683..ac21bb7bc0f335713fb86e093999870d92bc6eb3 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -392,7 +392,7 @@ static int iceland_ih_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs iceland_ih_ip_funcs = {
+static const struct amd_ip_funcs iceland_ih_ip_funcs = {
 	.name = "iceland_ih",
 	.early_init = iceland_ih_early_init,
 	.late_init = NULL,
@@ -421,3 +421,11 @@ static void iceland_ih_set_interrupt_funcs(struct amdgpu_device *adev)
 		adev->irq.ih_funcs = &iceland_ih_funcs;
 }
 
+const struct amdgpu_ip_block_version iceland_ih_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_IH,
+	.major = 2,
+	.minor = 4,
+	.rev = 0,
+	.funcs = &iceland_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.h b/drivers/gpu/drm/amd/amdgpu/iceland_ih.h
index 57558cddfbcbbc503a9df712e36fdea12ed6a61e..3235f42775486fc8e76f26214bf6044115149421 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.h
@@ -24,6 +24,6 @@
 #ifndef __ICELAND_IH_H__
 #define __ICELAND_IH_H__
 
-extern const struct amd_ip_funcs iceland_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version iceland_ih_ip_block;
 
 #endif /* __ICELAND_IH_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index f8618a3881a841a3160115eabd065a9b311f862b..b6f2e50636a93f76ace1d547f202ea4d7a1f9c67 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2796,7 +2796,7 @@ static int kv_parse_power_table(struct amdgpu_device *adev)
 	adev->pm.dpm.num_ps = state_array->ucNumEntries;
 
 	/* fill in the vce power states */
-	for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+	for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
 		u32 sclk;
 		clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
 		clock_info = (union pplib_clock_info *)
@@ -3243,6 +3243,18 @@ static int kv_dpm_set_powergating_state(void *handle,
 	return 0;
 }
 
+static int kv_check_state_equal(struct amdgpu_device *adev,
+				struct amdgpu_ps *cps,
+				struct amdgpu_ps *rps,
+				bool *equal)
+{
+	if (equal == NULL)
+		return -EINVAL;
+
+	*equal = false;
+	return 0;
+}
+
 const struct amd_ip_funcs kv_dpm_ip_funcs = {
 	.name = "kv_dpm",
 	.early_init = kv_dpm_early_init,
@@ -3273,6 +3285,8 @@ static const struct amdgpu_dpm_funcs kv_dpm_funcs = {
 	.force_performance_level = &kv_dpm_force_performance_level,
 	.powergate_uvd = &kv_dpm_powergate_uvd,
 	.enable_bapm = &kv_dpm_enable_bapm,
+	.get_vce_clock_state = amdgpu_get_vce_clock_state,
+	.check_state_equal = kv_check_state_equal,
 };
 
 static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev)
@@ -3291,3 +3305,12 @@ static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev)
 	adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
 	adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs;
 }
+
+const struct amdgpu_ip_block_version kv_dpm_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_SMC,
+	.major = 7,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &kv_dpm_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 565dab3c72186704542b3763fbcdcecb9a6e9cf9..03e8856b08ce00c48834f4c17dcf5f3d2c723b78 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -232,10 +232,10 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 
 	for (i = 0; i < count; i++)
 		if (sdma && sdma->burst_nop && (i == 0))
-			amdgpu_ring_write(ring, ring->nop |
+			amdgpu_ring_write(ring, ring->funcs->nop |
 				SDMA_PKT_NOP_HEADER_COUNT(count - 1));
 		else
-			amdgpu_ring_write(ring, ring->nop);
+			amdgpu_ring_write(ring, ring->funcs->nop);
 }
 
 /**
@@ -902,22 +902,6 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
 			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
 }
 
-static unsigned sdma_v2_4_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
-	return
-		7 + 6; /* sdma_v2_4_ring_emit_ib */
-}
-
-static unsigned sdma_v2_4_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
-	return
-		6 + /* sdma_v2_4_ring_emit_hdp_flush */
-		3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
-		6 + /* sdma_v2_4_ring_emit_pipeline_sync */
-		12 + /* sdma_v2_4_ring_emit_vm_flush */
-		10 + 10 + 10; /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
-}
-
 static int sdma_v2_4_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -965,11 +949,10 @@ static int sdma_v2_4_sw_init(void *handle)
 		ring->use_doorbell = false;
 		sprintf(ring->name, "sdma%d", i);
 		r = amdgpu_ring_init(adev, ring, 1024,
-				     SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
 				     &adev->sdma.trap_irq,
 				     (i == 0) ?
-				     AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
-				     AMDGPU_RING_TYPE_SDMA);
+				     AMDGPU_SDMA_IRQ_TRAP0 :
+				     AMDGPU_SDMA_IRQ_TRAP1);
 		if (r)
 			return r;
 	}
@@ -1204,7 +1187,7 @@ static int sdma_v2_4_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
+static const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
 	.name = "sdma_v2_4",
 	.early_init = sdma_v2_4_early_init,
 	.late_init = NULL,
@@ -1222,10 +1205,19 @@ const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
 };
 
 static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
+	.type = AMDGPU_RING_TYPE_SDMA,
+	.align_mask = 0xf,
+	.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
 	.get_rptr = sdma_v2_4_ring_get_rptr,
 	.get_wptr = sdma_v2_4_ring_get_wptr,
 	.set_wptr = sdma_v2_4_ring_set_wptr,
-	.parse_cs = NULL,
+	.emit_frame_size =
+		6 + /* sdma_v2_4_ring_emit_hdp_flush */
+		3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
+		6 + /* sdma_v2_4_ring_emit_pipeline_sync */
+		12 + /* sdma_v2_4_ring_emit_vm_flush */
+		10 + 10 + 10, /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
+	.emit_ib_size = 7 + 6, /* sdma_v2_4_ring_emit_ib */
 	.emit_ib = sdma_v2_4_ring_emit_ib,
 	.emit_fence = sdma_v2_4_ring_emit_fence,
 	.emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync,
@@ -1236,8 +1228,6 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
 	.test_ib = sdma_v2_4_ring_test_ib,
 	.insert_nop = sdma_v2_4_ring_insert_nop,
 	.pad_ib = sdma_v2_4_ring_pad_ib,
-	.get_emit_ib_size = sdma_v2_4_ring_get_emit_ib_size,
-	.get_dma_frame_size = sdma_v2_4_ring_get_dma_frame_size,
 };
 
 static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
@@ -1350,3 +1340,12 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
 		adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
 	}
 }
+
+const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_SDMA,
+	.major = 2,
+	.minor = 4,
+	.rev = 0,
+	.funcs = &sdma_v2_4_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h
index 07349f5ee10fce982e9f0c1980e7597cc6b0df38..28b4337292164b4346380b11452a69098be46824 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h
@@ -24,6 +24,6 @@
 #ifndef __SDMA_V2_4_H__
 #define __SDMA_V2_4_H__
 
-extern const struct amd_ip_funcs sdma_v2_4_ip_funcs;
+extern const struct amdgpu_ip_block_version sdma_v2_4_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index a9d10941fb53d9ab2290fd6bdbe3351d91b0acaa..6172d01e985a1fce9f5eb38706ab4666d8b62cf1 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -392,10 +392,10 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 
 	for (i = 0; i < count; i++)
 		if (sdma && sdma->burst_nop && (i == 0))
-			amdgpu_ring_write(ring, ring->nop |
+			amdgpu_ring_write(ring, ring->funcs->nop |
 				SDMA_PKT_NOP_HEADER_COUNT(count - 1));
 		else
-			amdgpu_ring_write(ring, ring->nop);
+			amdgpu_ring_write(ring, ring->funcs->nop);
 }
 
 /**
@@ -1104,22 +1104,6 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
 			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
 }
 
-static unsigned sdma_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
-	return
-		7 + 6; /* sdma_v3_0_ring_emit_ib */
-}
-
-static unsigned sdma_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
-	return
-		6 + /* sdma_v3_0_ring_emit_hdp_flush */
-		3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
-		6 + /* sdma_v3_0_ring_emit_pipeline_sync */
-		12 + /* sdma_v3_0_ring_emit_vm_flush */
-		10 + 10 + 10; /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
-}
-
 static int sdma_v3_0_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1177,11 +1161,10 @@ static int sdma_v3_0_sw_init(void *handle)
 
 		sprintf(ring->name, "sdma%d", i);
 		r = amdgpu_ring_init(adev, ring, 1024,
-				     SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
 				     &adev->sdma.trap_irq,
 				     (i == 0) ?
-				     AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
-				     AMDGPU_RING_TYPE_SDMA);
+				     AMDGPU_SDMA_IRQ_TRAP0 :
+				     AMDGPU_SDMA_IRQ_TRAP1);
 		if (r)
 			return r;
 	}
@@ -1544,7 +1527,7 @@ static int sdma_v3_0_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
+static const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
 	.name = "sdma_v3_0",
 	.early_init = sdma_v3_0_early_init,
 	.late_init = NULL,
@@ -1565,10 +1548,19 @@ const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
 };
 
 static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
+	.type = AMDGPU_RING_TYPE_SDMA,
+	.align_mask = 0xf,
+	.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
 	.get_rptr = sdma_v3_0_ring_get_rptr,
 	.get_wptr = sdma_v3_0_ring_get_wptr,
 	.set_wptr = sdma_v3_0_ring_set_wptr,
-	.parse_cs = NULL,
+	.emit_frame_size =
+		6 + /* sdma_v3_0_ring_emit_hdp_flush */
+		3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
+		6 + /* sdma_v3_0_ring_emit_pipeline_sync */
+		12 + /* sdma_v3_0_ring_emit_vm_flush */
+		10 + 10 + 10, /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
+	.emit_ib_size = 7 + 6, /* sdma_v3_0_ring_emit_ib */
 	.emit_ib = sdma_v3_0_ring_emit_ib,
 	.emit_fence = sdma_v3_0_ring_emit_fence,
 	.emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync,
@@ -1579,8 +1571,6 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
 	.test_ib = sdma_v3_0_ring_test_ib,
 	.insert_nop = sdma_v3_0_ring_insert_nop,
 	.pad_ib = sdma_v3_0_ring_pad_ib,
-	.get_emit_ib_size = sdma_v3_0_ring_get_emit_ib_size,
-	.get_dma_frame_size = sdma_v3_0_ring_get_dma_frame_size,
 };
 
 static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1693,3 +1683,21 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
 		adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
 	}
 }
+
+const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_SDMA,
+	.major = 3,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &sdma_v3_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version sdma_v3_1_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_SDMA,
+	.major = 3,
+	.minor = 1,
+	.rev = 0,
+	.funcs = &sdma_v3_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h
index 0cb9698a30544ebb1112dcf5f8499d8fa713e729..7aa223d35f1b1ea990c3d781d6cfbf931e29e10f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h
@@ -24,6 +24,7 @@
 #ifndef __SDMA_V3_0_H__
 #define __SDMA_V3_0_H__
 
-extern const struct amd_ip_funcs sdma_v3_0_ip_funcs;
+extern const struct amdgpu_ip_block_version sdma_v3_0_ip_block;
+extern const struct amdgpu_ip_block_version sdma_v3_1_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index dc9511c5ecb8ea393cc3097a9507b2e2e959f962..3ed8ad8725b9cf09c965876a482b0699e5704058 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -39,6 +39,7 @@
 #include "si_dma.h"
 #include "dce_v6_0.h"
 #include "si.h"
+#include "dce_virtual.h"
 
 static const u32 tahiti_golden_registers[] =
 {
@@ -905,7 +906,7 @@ static void si_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 }
 
-u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
+static u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
 {
 	unsigned long flags;
 	u32 r;
@@ -918,7 +919,7 @@ u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
 	return r;
 }
 
-void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+static void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 {
 	unsigned long flags;
 
@@ -1811,7 +1812,7 @@ static int si_common_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs si_common_ip_funcs = {
+static const struct amd_ip_funcs si_common_ip_funcs = {
 	.name = "si_common",
 	.early_init = si_common_early_init,
 	.late_init = NULL,
@@ -1828,119 +1829,13 @@ const struct amd_ip_funcs si_common_ip_funcs = {
 	.set_powergating_state = si_common_set_powergating_state,
 };
 
-static const struct amdgpu_ip_block_version verde_ip_blocks[] =
+static const struct amdgpu_ip_block_version si_common_ip_block =
 {
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &si_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 6,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v6_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &si_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 6,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 6,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &dce_v6_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 6,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gfx_v6_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &si_dma_ip_funcs,
-	},
-/*	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 3,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &si_null_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &si_null_ip_funcs,
-	},
-	*/
-};
-
-
-static const struct amdgpu_ip_block_version hainan_ip_blocks[] =
-{
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &si_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 6,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v6_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &si_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 6,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 6,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gfx_v6_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &si_dma_ip_funcs,
-	},
+	.type = AMD_IP_BLOCK_TYPE_COMMON,
+	.major = 1,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &si_common_ip_funcs,
 };
 
 int si_set_ip_blocks(struct amdgpu_device *adev)
@@ -1949,13 +1844,42 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
 	case CHIP_VERDE:
 	case CHIP_TAHITI:
 	case CHIP_PITCAIRN:
+		amdgpu_ip_block_add(adev, &si_common_ip_block);
+		amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
+		amdgpu_ip_block_add(adev, &si_ih_ip_block);
+		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+		if (adev->enable_virtual_display)
+			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+		else
+			amdgpu_ip_block_add(adev, &dce_v6_0_ip_block);
+		amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
+		amdgpu_ip_block_add(adev, &si_dma_ip_block);
+		/* amdgpu_ip_block_add(adev, &uvd_v3_1_ip_block); */
+		/* amdgpu_ip_block_add(adev, &vce_v1_0_ip_block); */
+		break;
 	case CHIP_OLAND:
-		adev->ip_blocks = verde_ip_blocks;
-		adev->num_ip_blocks = ARRAY_SIZE(verde_ip_blocks);
+		amdgpu_ip_block_add(adev, &si_common_ip_block);
+		amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
+		amdgpu_ip_block_add(adev, &si_ih_ip_block);
+		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+		if (adev->enable_virtual_display)
+			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+		else
+			amdgpu_ip_block_add(adev, &dce_v6_4_ip_block);
+		amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
+		amdgpu_ip_block_add(adev, &si_dma_ip_block);
+		/* amdgpu_ip_block_add(adev, &uvd_v3_1_ip_block); */
+		/* amdgpu_ip_block_add(adev, &vce_v1_0_ip_block); */
 		break;
 	case CHIP_HAINAN:
-		adev->ip_blocks = hainan_ip_blocks;
-		adev->num_ip_blocks = ARRAY_SIZE(hainan_ip_blocks);
+		amdgpu_ip_block_add(adev, &si_common_ip_block);
+		amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
+		amdgpu_ip_block_add(adev, &si_ih_ip_block);
+		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+		if (adev->enable_virtual_display)
+			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+		amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
+		amdgpu_ip_block_add(adev, &si_dma_ip_block);
 		break;
 	default:
 		BUG();
diff --git a/drivers/gpu/drm/amd/amdgpu/si.h b/drivers/gpu/drm/amd/amdgpu/si.h
index 959d7b63e0e526b6b4809f9c34a6203f4078a2aa..589225080c24c26a115129777a36cf40ecfcc5ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.h
+++ b/drivers/gpu/drm/amd/amdgpu/si.h
@@ -24,8 +24,6 @@
 #ifndef __SI_H__
 #define __SI_H__
 
-extern const struct amd_ip_funcs si_common_ip_funcs;
-
 void si_srbm_select(struct amdgpu_device *adev,
 		     u32 me, u32 pipe, u32 queue, u32 vmid);
 int si_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index de358193a8f9d07927cb70f45901fd182f0f94e3..14265c5c349ef793794192eb08b34e1969f2e6bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -495,22 +495,6 @@ static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
 	amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
 }
 
-static unsigned si_dma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
-	return
-		7 + 3; /* si_dma_ring_emit_ib */
-}
-
-static unsigned si_dma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
-	return
-		3 + /* si_dma_ring_emit_hdp_flush */
-		3 + /* si_dma_ring_emit_hdp_invalidate */
-		6 + /* si_dma_ring_emit_pipeline_sync */
-		12 + /* si_dma_ring_emit_vm_flush */
-		9 + 9 + 9; /* si_dma_ring_emit_fence x3 for user fence, vm fence */
-}
-
 static int si_dma_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -547,11 +531,10 @@ static int si_dma_sw_init(void *handle)
 		ring->use_doorbell = false;
 		sprintf(ring->name, "sdma%d", i);
 		r = amdgpu_ring_init(adev, ring, 1024,
-				     DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), 0xf,
 				     &adev->sdma.trap_irq,
 				     (i == 0) ?
-				     AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
-				     AMDGPU_RING_TYPE_SDMA);
+				     AMDGPU_SDMA_IRQ_TRAP0 :
+				     AMDGPU_SDMA_IRQ_TRAP1);
 		if (r)
 			return r;
 	}
@@ -762,7 +745,7 @@ static int si_dma_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs si_dma_ip_funcs = {
+static const struct amd_ip_funcs si_dma_ip_funcs = {
 	.name = "si_dma",
 	.early_init = si_dma_early_init,
 	.late_init = NULL,
@@ -780,10 +763,19 @@ const struct amd_ip_funcs si_dma_ip_funcs = {
 };
 
 static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
+	.type = AMDGPU_RING_TYPE_SDMA,
+	.align_mask = 0xf,
+	.nop = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0),
 	.get_rptr = si_dma_ring_get_rptr,
 	.get_wptr = si_dma_ring_get_wptr,
 	.set_wptr = si_dma_ring_set_wptr,
-	.parse_cs = NULL,
+	.emit_frame_size =
+		3 + /* si_dma_ring_emit_hdp_flush */
+		3 + /* si_dma_ring_emit_hdp_invalidate */
+		6 + /* si_dma_ring_emit_pipeline_sync */
+		12 + /* si_dma_ring_emit_vm_flush */
+		9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */
+	.emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */
 	.emit_ib = si_dma_ring_emit_ib,
 	.emit_fence = si_dma_ring_emit_fence,
 	.emit_pipeline_sync = si_dma_ring_emit_pipeline_sync,
@@ -794,8 +786,6 @@ static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
 	.test_ib = si_dma_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
 	.pad_ib = si_dma_ring_pad_ib,
-	.get_emit_ib_size = si_dma_ring_get_emit_ib_size,
-	.get_dma_frame_size = si_dma_ring_get_dma_frame_size,
 };
 
 static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
@@ -913,3 +903,12 @@ static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
 		adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
 	}
 }
+
+const struct amdgpu_ip_block_version si_dma_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_SDMA,
+	.major = 1,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &si_dma_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.h b/drivers/gpu/drm/amd/amdgpu/si_dma.h
index 3a3e0c78a54baaf910df1959f48358cc8aa308e1..5ac1b8452fb4160a92d67b3c0d5ba2c1f303dc72 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.h
@@ -24,6 +24,6 @@
 #ifndef __SI_DMA_H__
 #define __SI_DMA_H__
 
-extern const struct amd_ip_funcs si_dma_ip_funcs;
+extern const struct amdgpu_ip_block_version si_dma_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 3de7bca5854b1b06f20077d177c5476511d5e74b..91721339678709385d125d4b4d85defccfe5d61c 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3171,6 +3171,7 @@ static void ni_update_current_ps(struct amdgpu_device *adev,
 	eg_pi->current_rps = *rps;
 	ni_pi->current_ps = *new_ps;
 	eg_pi->current_rps.ps_priv = &ni_pi->current_ps;
+	adev->pm.dpm.current_ps = &eg_pi->current_rps;
 }
 
 static void ni_update_requested_ps(struct amdgpu_device *adev,
@@ -3183,6 +3184,7 @@ static void ni_update_requested_ps(struct amdgpu_device *adev,
 	eg_pi->requested_rps = *rps;
 	ni_pi->requested_ps = *new_ps;
 	eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps;
+	adev->pm.dpm.requested_ps = &eg_pi->requested_rps;
 }
 
 static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev,
@@ -7320,7 +7322,7 @@ static int si_parse_power_table(struct amdgpu_device *adev)
 	adev->pm.dpm.num_ps = state_array->ucNumEntries;
 
 	/* fill in the vce power states */
-	for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+	for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
 		u32 sclk, mclk;
 		clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
 		clock_info = (union pplib_clock_info *)
@@ -7957,6 +7959,57 @@ static int si_dpm_early_init(void *handle)
 	return 0;
 }
 
+static inline bool si_are_power_levels_equal(const struct rv7xx_pl  *si_cpl1,
+						const struct rv7xx_pl *si_cpl2)
+{
+	return ((si_cpl1->mclk == si_cpl2->mclk) &&
+		  (si_cpl1->sclk == si_cpl2->sclk) &&
+		  (si_cpl1->pcie_gen == si_cpl2->pcie_gen) &&
+		  (si_cpl1->vddc == si_cpl2->vddc) &&
+		  (si_cpl1->vddci == si_cpl2->vddci));
+}
+
+static int si_check_state_equal(struct amdgpu_device *adev,
+				struct amdgpu_ps *cps,
+				struct amdgpu_ps *rps,
+				bool *equal)
+{
+	struct si_ps *si_cps;
+	struct si_ps *si_rps;
+	int i;
+
+	if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
+		return -EINVAL;
+
+	si_cps = si_get_ps(cps);
+	si_rps = si_get_ps(rps);
+
+	if (si_cps == NULL) {
+		printk("si_cps is NULL\n");
+		*equal = false;
+		return 0;
+	}
+
+	if (si_cps->performance_level_count != si_rps->performance_level_count) {
+		*equal = false;
+		return 0;
+	}
+
+	for (i = 0; i < si_cps->performance_level_count; i++) {
+		if (!si_are_power_levels_equal(&(si_cps->performance_levels[i]),
+					&(si_rps->performance_levels[i]))) {
+			*equal = false;
+			return 0;
+		}
+	}
+
+	/* If all performance levels are the same try to use the UVD clocks to break the tie.*/
+	*equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
+	*equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
+
+	return 0;
+}
+
 
 const struct amd_ip_funcs si_dpm_ip_funcs = {
 	.name = "si_dpm",
@@ -7991,6 +8044,8 @@ static const struct amdgpu_dpm_funcs si_dpm_funcs = {
 	.get_fan_control_mode = &si_dpm_get_fan_control_mode,
 	.set_fan_speed_percent = &si_dpm_set_fan_speed_percent,
 	.get_fan_speed_percent = &si_dpm_get_fan_speed_percent,
+	.check_state_equal = &si_check_state_equal,
+	.get_vce_clock_state = amdgpu_get_vce_clock_state,
 };
 
 static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev)
@@ -8010,3 +8065,11 @@ static void si_dpm_set_irq_funcs(struct amdgpu_device *adev)
 	adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs;
 }
 
+const struct amdgpu_ip_block_version si_dpm_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_SMC,
+	.major = 6,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &si_dpm_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 8fae3d4a236016bd70819f97336db96540eb7a84..db0f3684666104d54c802fc6b3ce7604f153898b 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -268,7 +268,7 @@ static int si_ih_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs si_ih_ip_funcs = {
+static const struct amd_ip_funcs si_ih_ip_funcs = {
 	.name = "si_ih",
 	.early_init = si_ih_early_init,
 	.late_init = NULL,
@@ -297,3 +297,11 @@ static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
 		adev->irq.ih_funcs = &si_ih_funcs;
 }
 
+const struct amdgpu_ip_block_version si_ih_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_IH,
+	.major = 1,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &si_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.h b/drivers/gpu/drm/amd/amdgpu/si_ih.h
index f3e3a954369cae496ef1df4c1e96688fe077157f..42e64a53e24f2ff5c82685a8e8f7616558e6fd7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.h
@@ -24,6 +24,6 @@
 #ifndef __SI_IH_H__
 #define __SI_IH_H__
 
-extern const struct amd_ip_funcs si_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version si_ih_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index b4ea229bb4498ff1f84209dec7f211198ed788ed..52b71ee587930980b8e9e360b784cc8e8fcf83ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -455,7 +455,7 @@ static int tonga_ih_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs tonga_ih_ip_funcs = {
+static const struct amd_ip_funcs tonga_ih_ip_funcs = {
 	.name = "tonga_ih",
 	.early_init = tonga_ih_early_init,
 	.late_init = NULL,
@@ -487,3 +487,11 @@ static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev)
 		adev->irq.ih_funcs = &tonga_ih_funcs;
 }
 
+const struct amdgpu_ip_block_version tonga_ih_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_IH,
+	.major = 3,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &tonga_ih_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.h b/drivers/gpu/drm/amd/amdgpu/tonga_ih.h
index 7392d70fa4a7bed7e6978c0dbc85cc597929669f..499027eee5c2d0800580ed6f4e276900413a3aec 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.h
@@ -24,6 +24,6 @@
 #ifndef __TONGA_IH_H__
 #define __TONGA_IH_H__
 
-extern const struct amd_ip_funcs tonga_ih_ip_funcs;
+extern const struct amdgpu_ip_block_version tonga_ih_ip_block;
 
-#endif /* __CZ_IH_H__ */
+#endif /* __TONGA_IH_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index f6c941550b8ffd292524dc42cd2dc8504824d56a..8f9c7d55dddaa17652f5b60488e004c42f5de6a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -36,6 +36,9 @@
 
 #include "bif/bif_4_1_d.h"
 
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
+
 static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
 static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
@@ -116,8 +119,7 @@ static int uvd_v4_2_sw_init(void *handle)
 
 	ring = &adev->uvd.ring;
 	sprintf(ring->name, "uvd");
-	r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
-			     &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
+	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
 
 	return r;
 }
@@ -526,20 +528,6 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
 	amdgpu_ring_write(ring, ib->length_dw);
 }
 
-static unsigned uvd_v4_2_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
-	return
-		4; /* uvd_v4_2_ring_emit_ib */
-}
-
-static unsigned uvd_v4_2_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
-	return
-		2 + /* uvd_v4_2_ring_emit_hdp_flush */
-		2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
-		14; /* uvd_v4_2_ring_emit_fence  x1 no user fence */
-}
-
 /**
  * uvd_v4_2_mc_resume - memory controller programming
  *
@@ -698,18 +686,34 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
 	return 0;
 }
 
+static void uvd_v5_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+{
+	u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
+
+	if (enable)
+		tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
+			GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
+	else
+		tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
+			 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
+
+	WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
+}
+
 static int uvd_v4_2_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
 	bool gate = false;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
-		return 0;
-
 	if (state == AMD_CG_STATE_GATE)
 		gate = true;
 
+	uvd_v5_0_set_bypass_mode(adev, gate);
+
+	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
+		return 0;
+
 	uvd_v4_2_enable_mgcg(adev, gate);
 
 	return 0;
@@ -738,7 +742,7 @@ static int uvd_v4_2_set_powergating_state(void *handle,
 	}
 }
 
-const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
+static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
 	.name = "uvd_v4_2",
 	.early_init = uvd_v4_2_early_init,
 	.late_init = NULL,
@@ -756,10 +760,18 @@ const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
 };
 
 static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
+	.type = AMDGPU_RING_TYPE_UVD,
+	.align_mask = 0xf,
+	.nop = PACKET0(mmUVD_NO_OP, 0),
 	.get_rptr = uvd_v4_2_ring_get_rptr,
 	.get_wptr = uvd_v4_2_ring_get_wptr,
 	.set_wptr = uvd_v4_2_ring_set_wptr,
 	.parse_cs = amdgpu_uvd_ring_parse_cs,
+	.emit_frame_size =
+		2 + /* uvd_v4_2_ring_emit_hdp_flush */
+		2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
+		14, /* uvd_v4_2_ring_emit_fence  x1 no user fence */
+	.emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */
 	.emit_ib = uvd_v4_2_ring_emit_ib,
 	.emit_fence = uvd_v4_2_ring_emit_fence,
 	.emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush,
@@ -770,8 +782,6 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_uvd_ring_begin_use,
 	.end_use = amdgpu_uvd_ring_end_use,
-	.get_emit_ib_size = uvd_v4_2_ring_get_emit_ib_size,
-	.get_dma_frame_size = uvd_v4_2_ring_get_dma_frame_size,
 };
 
 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
@@ -789,3 +799,12 @@ static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev)
 	adev->uvd.irq.num_types = 1;
 	adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs;
 }
+
+const struct amdgpu_ip_block_version uvd_v4_2_ip_block =
+{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 4,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &uvd_v4_2_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h
index 0a615dd50840438b0585391076066ac0769b4906..8a0444bb8b952f7e3945280eb029fe2ddf596346 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h
@@ -24,6 +24,6 @@
 #ifndef __UVD_V4_2_H__
 #define __UVD_V4_2_H__
 
-extern const struct amd_ip_funcs uvd_v4_2_ip_funcs;
+extern const struct amdgpu_ip_block_version uvd_v4_2_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 400c16fe579e0ac970110b9106172aba518a56a3..95303e2d5f92707f31ec2a86bef4b6e7497e9c8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -33,6 +33,8 @@
 #include "oss/oss_2_0_sh_mask.h"
 #include "bif/bif_5_0_d.h"
 #include "vi.h"
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
 
 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -112,8 +114,7 @@ static int uvd_v5_0_sw_init(void *handle)
 
 	ring = &adev->uvd.ring;
 	sprintf(ring->name, "uvd");
-	r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
-			     &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
+	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
 
 	return r;
 }
@@ -577,20 +578,6 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
 	amdgpu_ring_write(ring, ib->length_dw);
 }
 
-static unsigned uvd_v5_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
-	return
-		6; /* uvd_v5_0_ring_emit_ib */
-}
-
-static unsigned uvd_v5_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
-	return
-		2 + /* uvd_v5_0_ring_emit_hdp_flush */
-		2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
-		14; /* uvd_v5_0_ring_emit_fence  x1 no user fence */
-}
-
 static bool uvd_v5_0_is_idle(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -737,6 +724,20 @@ static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
 }
 #endif
 
+static void uvd_v5_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+{
+	u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
+
+	if (enable)
+		tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
+			GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
+	else
+		tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
+			 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
+
+	WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
+}
+
 static int uvd_v5_0_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
@@ -744,6 +745,8 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
 	static int curstate = -1;
 
+	uvd_v5_0_set_bypass_mode(adev, enable);
+
 	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
 		return 0;
 
@@ -789,7 +792,7 @@ static int uvd_v5_0_set_powergating_state(void *handle,
 	}
 }
 
-const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
+static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
 	.name = "uvd_v5_0",
 	.early_init = uvd_v5_0_early_init,
 	.late_init = NULL,
@@ -807,10 +810,18 @@ const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
 };
 
 static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
+	.type = AMDGPU_RING_TYPE_UVD,
+	.align_mask = 0xf,
+	.nop = PACKET0(mmUVD_NO_OP, 0),
 	.get_rptr = uvd_v5_0_ring_get_rptr,
 	.get_wptr = uvd_v5_0_ring_get_wptr,
 	.set_wptr = uvd_v5_0_ring_set_wptr,
 	.parse_cs = amdgpu_uvd_ring_parse_cs,
+	.emit_frame_size =
+		2 + /* uvd_v5_0_ring_emit_hdp_flush */
+		2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
+		14, /* uvd_v5_0_ring_emit_fence  x1 no user fence */
+	.emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */
 	.emit_ib = uvd_v5_0_ring_emit_ib,
 	.emit_fence = uvd_v5_0_ring_emit_fence,
 	.emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
@@ -821,8 +832,6 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_uvd_ring_begin_use,
 	.end_use = amdgpu_uvd_ring_end_use,
-	.get_emit_ib_size = uvd_v5_0_ring_get_emit_ib_size,
-	.get_dma_frame_size = uvd_v5_0_ring_get_dma_frame_size,
 };
 
 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -840,3 +849,12 @@ static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
 	adev->uvd.irq.num_types = 1;
 	adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs;
 }
+
+const struct amdgpu_ip_block_version uvd_v5_0_ip_block =
+{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 5,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &uvd_v5_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h
index e3b3c49fa5de52e111f8326f9a363abb1c48a448..2eaaea793ac5f347a46223f096f5d277682df4a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h
@@ -24,6 +24,6 @@
 #ifndef __UVD_V5_0_H__
 #define __UVD_V5_0_H__
 
-extern const struct amd_ip_funcs uvd_v5_0_ip_funcs;
+extern const struct amdgpu_ip_block_version uvd_v5_0_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index ab3df6d756562ee33b97d2c48aaf6f7bfadc6f2a..a339b5ccb2965dcac9f0885888079a477cee747f 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -116,8 +116,7 @@ static int uvd_v6_0_sw_init(void *handle)
 
 	ring = &adev->uvd.ring;
 	sprintf(ring->name, "uvd");
-	r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
-			     &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
+	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
 
 	return r;
 }
@@ -725,31 +724,6 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 	amdgpu_ring_write(ring, 0xE);
 }
 
-static unsigned uvd_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
-	return
-		8; /* uvd_v6_0_ring_emit_ib */
-}
-
-static unsigned uvd_v6_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
-	return
-		2 + /* uvd_v6_0_ring_emit_hdp_flush */
-		2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
-		10 + /* uvd_v6_0_ring_emit_pipeline_sync */
-		14; /* uvd_v6_0_ring_emit_fence x1 no user fence */
-}
-
-static unsigned uvd_v6_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
-{
-	return
-		2 + /* uvd_v6_0_ring_emit_hdp_flush */
-		2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
-		10 + /* uvd_v6_0_ring_emit_pipeline_sync */
-		20 + /* uvd_v6_0_ring_emit_vm_flush */
-		14 + 14; /* uvd_v6_0_ring_emit_fence x2 vm fence */
-}
-
 static bool uvd_v6_0_is_idle(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -961,7 +935,7 @@ static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
 }
 #endif
 
-static void uvd_v6_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+static void uvd_v6_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
 {
 	u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
 
@@ -979,15 +953,14 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
 
-	if (adev->asic_type == CHIP_FIJI ||
-	    adev->asic_type == CHIP_POLARIS10)
-		uvd_v6_set_bypass_mode(adev, state == AMD_CG_STATE_GATE ? true : false);
+	uvd_v6_0_set_bypass_mode(adev, enable);
 
 	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
 		return 0;
 
-	if (state == AMD_CG_STATE_GATE) {
+	if (enable) {
 		/* disable HW gating and enable Sw gating */
 		uvd_v6_0_set_sw_clock_gating(adev);
 	} else {
@@ -1027,7 +1000,7 @@ static int uvd_v6_0_set_powergating_state(void *handle,
 	}
 }
 
-const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
+static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
 	.name = "uvd_v6_0",
 	.early_init = uvd_v6_0_early_init,
 	.late_init = NULL,
@@ -1048,10 +1021,19 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
 };
 
 static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
+	.type = AMDGPU_RING_TYPE_UVD,
+	.align_mask = 0xf,
+	.nop = PACKET0(mmUVD_NO_OP, 0),
 	.get_rptr = uvd_v6_0_ring_get_rptr,
 	.get_wptr = uvd_v6_0_ring_get_wptr,
 	.set_wptr = uvd_v6_0_ring_set_wptr,
 	.parse_cs = amdgpu_uvd_ring_parse_cs,
+	.emit_frame_size =
+		2 + /* uvd_v6_0_ring_emit_hdp_flush */
+		2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
+		10 + /* uvd_v6_0_ring_emit_pipeline_sync */
+		14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
+	.emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
 	.emit_ib = uvd_v6_0_ring_emit_ib,
 	.emit_fence = uvd_v6_0_ring_emit_fence,
 	.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
@@ -1062,15 +1044,22 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_uvd_ring_begin_use,
 	.end_use = amdgpu_uvd_ring_end_use,
-	.get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
-	.get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size,
 };
 
 static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
+	.type = AMDGPU_RING_TYPE_UVD,
+	.align_mask = 0xf,
+	.nop = PACKET0(mmUVD_NO_OP, 0),
 	.get_rptr = uvd_v6_0_ring_get_rptr,
 	.get_wptr = uvd_v6_0_ring_get_wptr,
 	.set_wptr = uvd_v6_0_ring_set_wptr,
-	.parse_cs = NULL,
+	.emit_frame_size =
+		2 + /* uvd_v6_0_ring_emit_hdp_flush */
+		2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
+		10 + /* uvd_v6_0_ring_emit_pipeline_sync */
+		20 + /* uvd_v6_0_ring_emit_vm_flush */
+		14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
+	.emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
 	.emit_ib = uvd_v6_0_ring_emit_ib,
 	.emit_fence = uvd_v6_0_ring_emit_fence,
 	.emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
@@ -1083,8 +1072,6 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_uvd_ring_begin_use,
 	.end_use = amdgpu_uvd_ring_end_use,
-	.get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
-	.get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size_vm,
 };
 
 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1108,3 +1095,30 @@ static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
 	adev->uvd.irq.num_types = 1;
 	adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs;
 }
+
+const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
+{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 6,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &uvd_v6_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
+{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 6,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &uvd_v6_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
+{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 6,
+		.minor = 3,
+		.rev = 0,
+		.funcs = &uvd_v6_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h
index 6b92a235298650776c06396e3ad2dbfeae0e1080..d3d48c6428cd9d6a691ed564b76c39864dd228c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h
@@ -24,6 +24,8 @@
 #ifndef __UVD_V6_0_H__
 #define __UVD_V6_0_H__
 
-extern const struct amd_ip_funcs uvd_v6_0_ip_funcs;
+extern const struct amdgpu_ip_block_version uvd_v6_0_ip_block;
+extern const struct amdgpu_ip_block_version uvd_v6_2_ip_block;
+extern const struct amdgpu_ip_block_version uvd_v6_3_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 76e64ad04a53e2f71453c301e4ec0bef022dd9ce..38ed903dd6f821bbdb20b098eee61ce820fe745f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -224,8 +224,8 @@ static int vce_v2_0_sw_init(void *handle)
 	for (i = 0; i < adev->vce.num_rings; i++) {
 		ring = &adev->vce.ring[i];
 		sprintf(ring->name, "vce%d", i);
-		r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
-				     &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+		r = amdgpu_ring_init(adev, ring, 512,
+				     &adev->vce.irq, 0);
 		if (r)
 			return r;
 	}
@@ -592,7 +592,7 @@ static int vce_v2_0_set_powergating_state(void *handle,
 		return vce_v2_0_start(adev);
 }
 
-const struct amd_ip_funcs vce_v2_0_ip_funcs = {
+static const struct amd_ip_funcs vce_v2_0_ip_funcs = {
 	.name = "vce_v2_0",
 	.early_init = vce_v2_0_early_init,
 	.late_init = NULL,
@@ -610,10 +610,15 @@ const struct amd_ip_funcs vce_v2_0_ip_funcs = {
 };
 
 static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
+	.type = AMDGPU_RING_TYPE_VCE,
+	.align_mask = 0xf,
+	.nop = VCE_CMD_NO_OP,
 	.get_rptr = vce_v2_0_ring_get_rptr,
 	.get_wptr = vce_v2_0_ring_get_wptr,
 	.set_wptr = vce_v2_0_ring_set_wptr,
 	.parse_cs = amdgpu_vce_ring_parse_cs,
+	.emit_frame_size = 6, /* amdgpu_vce_ring_emit_fence  x1 no user fence */
+	.emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
 	.emit_ib = amdgpu_vce_ring_emit_ib,
 	.emit_fence = amdgpu_vce_ring_emit_fence,
 	.test_ring = amdgpu_vce_ring_test_ring,
@@ -622,8 +627,6 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_vce_ring_begin_use,
 	.end_use = amdgpu_vce_ring_end_use,
-	.get_emit_ib_size = amdgpu_vce_ring_get_emit_ib_size,
-	.get_dma_frame_size = amdgpu_vce_ring_get_dma_frame_size,
 };
 
 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -644,3 +647,12 @@ static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev)
 	adev->vce.irq.num_types = 1;
 	adev->vce.irq.funcs = &vce_v2_0_irq_funcs;
 };
+
+const struct amdgpu_ip_block_version vce_v2_0_ip_block =
+{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vce_v2_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h
index 0d2ae8a01acd0716b93795227bfc0eeab26f8f77..4d15167654a6e36ddf48f46f153e9e84044f9b10 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h
@@ -24,6 +24,6 @@
 #ifndef __VCE_V2_0_H__
 #define __VCE_V2_0_H__
 
-extern const struct amd_ip_funcs vce_v2_0_ip_funcs;
+extern const struct amdgpu_ip_block_version vce_v2_0_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 8533269ec1606f1ed26714d9655a5eae20b49421..5ed2930a8568b8f3e1698312850ec9a9f306faa0 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -389,8 +389,7 @@ static int vce_v3_0_sw_init(void *handle)
 	for (i = 0; i < adev->vce.num_rings; i++) {
 		ring = &adev->vce.ring[i];
 		sprintf(ring->name, "vce%d", i);
-		r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
-				     &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+		r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
 		if (r)
 			return r;
 	}
@@ -808,28 +807,7 @@ static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring)
 	amdgpu_ring_write(ring, seq);
 }
 
-static unsigned vce_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
-{
-	return
-		5; /* vce_v3_0_ring_emit_ib */
-}
-
-static unsigned vce_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
-{
-	return
-		4 + /* vce_v3_0_emit_pipeline_sync */
-		6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
-}
-
-static unsigned vce_v3_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
-{
-	return
-		6 + /* vce_v3_0_emit_vm_flush */
-		4 + /* vce_v3_0_emit_pipeline_sync */
-		6 + 6; /* amdgpu_vce_ring_emit_fence x2 vm fence */
-}
-
-const struct amd_ip_funcs vce_v3_0_ip_funcs = {
+static const struct amd_ip_funcs vce_v3_0_ip_funcs = {
 	.name = "vce_v3_0",
 	.early_init = vce_v3_0_early_init,
 	.late_init = NULL,
@@ -850,10 +828,17 @@ const struct amd_ip_funcs vce_v3_0_ip_funcs = {
 };
 
 static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
+	.type = AMDGPU_RING_TYPE_VCE,
+	.align_mask = 0xf,
+	.nop = VCE_CMD_NO_OP,
 	.get_rptr = vce_v3_0_ring_get_rptr,
 	.get_wptr = vce_v3_0_ring_get_wptr,
 	.set_wptr = vce_v3_0_ring_set_wptr,
 	.parse_cs = amdgpu_vce_ring_parse_cs,
+	.emit_frame_size =
+		4 + /* vce_v3_0_emit_pipeline_sync */
+		6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
+	.emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
 	.emit_ib = amdgpu_vce_ring_emit_ib,
 	.emit_fence = amdgpu_vce_ring_emit_fence,
 	.test_ring = amdgpu_vce_ring_test_ring,
@@ -862,15 +847,21 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_vce_ring_begin_use,
 	.end_use = amdgpu_vce_ring_end_use,
-	.get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
-	.get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size,
 };
 
 static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
+	.type = AMDGPU_RING_TYPE_VCE,
+	.align_mask = 0xf,
+	.nop = VCE_CMD_NO_OP,
 	.get_rptr = vce_v3_0_ring_get_rptr,
 	.get_wptr = vce_v3_0_ring_get_wptr,
 	.set_wptr = vce_v3_0_ring_set_wptr,
-	.parse_cs = NULL,
+	.parse_cs = amdgpu_vce_ring_parse_cs_vm,
+	.emit_frame_size =
+		6 + /* vce_v3_0_emit_vm_flush */
+		4 + /* vce_v3_0_emit_pipeline_sync */
+		6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */
+	.emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
 	.emit_ib = vce_v3_0_ring_emit_ib,
 	.emit_vm_flush = vce_v3_0_emit_vm_flush,
 	.emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
@@ -881,8 +872,6 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_vce_ring_begin_use,
 	.end_use = amdgpu_vce_ring_end_use,
-	.get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
-	.get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size_vm,
 };
 
 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -910,3 +899,30 @@ static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev)
 	adev->vce.irq.num_types = 1;
 	adev->vce.irq.funcs = &vce_v3_0_irq_funcs;
 };
+
+const struct amdgpu_ip_block_version vce_v3_0_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_VCE,
+	.major = 3,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &vce_v3_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version vce_v3_1_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_VCE,
+	.major = 3,
+	.minor = 1,
+	.rev = 0,
+	.funcs = &vce_v3_0_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version vce_v3_4_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_VCE,
+	.major = 3,
+	.minor = 4,
+	.rev = 0,
+	.funcs = &vce_v3_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h
index b45af65da81fdecc7b7edfea64cc4d30c9d8b816..08b908c7de0fca1b40f5af527ba39174a9a91c83 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h
@@ -24,6 +24,8 @@
 #ifndef __VCE_V3_0_H__
 #define __VCE_V3_0_H__
 
-extern const struct amd_ip_funcs vce_v3_0_ip_funcs;
+extern const struct amdgpu_ip_block_version vce_v3_0_ip_block;
+extern const struct amdgpu_ip_block_version vce_v3_1_ip_block;
+extern const struct amdgpu_ip_block_version vce_v3_4_ip_block;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index c0d9aad7126f4a16e067e19e76067d0a3248e8d9..25c0a71b257dad65398f33800f75ba5ee111dd1a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -121,8 +121,8 @@ static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
 	u32 r;
 
 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
-	WREG32(mmSMC_IND_INDEX_0, (reg));
-	r = RREG32(mmSMC_IND_DATA_0);
+	WREG32(mmSMC_IND_INDEX_11, (reg));
+	r = RREG32(mmSMC_IND_DATA_11);
 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
 	return r;
 }
@@ -132,8 +132,8 @@ static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 	unsigned long flags;
 
 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
-	WREG32(mmSMC_IND_INDEX_0, (reg));
-	WREG32(mmSMC_IND_DATA_0, (v));
+	WREG32(mmSMC_IND_INDEX_11, (reg));
+	WREG32(mmSMC_IND_DATA_11, (v));
 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
 }
 
@@ -437,12 +437,12 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
 	/* take the smc lock since we are using the smc index */
 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
 	/* set rom index to 0 */
-	WREG32(mmSMC_IND_INDEX_0, ixROM_INDEX);
-	WREG32(mmSMC_IND_DATA_0, 0);
+	WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
+	WREG32(mmSMC_IND_DATA_11, 0);
 	/* set index to data for continous read */
-	WREG32(mmSMC_IND_INDEX_0, ixROM_DATA);
+	WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
 	for (i = 0; i < length_dw; i++)
-		dw_ptr[i] = RREG32(mmSMC_IND_DATA_0);
+		dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
 
 	return true;
@@ -556,21 +556,100 @@ static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] =
 	{mmPA_SC_RASTER_CONFIG_1, false, true},
 };
 
-static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
-					 u32 sh_num, u32 reg_offset)
-{
-	uint32_t val;
+static uint32_t vi_get_register_value(struct amdgpu_device *adev,
+				      bool indexed, u32 se_num,
+				      u32 sh_num, u32 reg_offset)
+{
+	if (indexed) {
+		uint32_t val;
+		unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
+		unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
+
+		switch (reg_offset) {
+		case mmCC_RB_BACKEND_DISABLE:
+			return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
+		case mmGC_USER_RB_BACKEND_DISABLE:
+			return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
+		case mmPA_SC_RASTER_CONFIG:
+			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
+		case mmPA_SC_RASTER_CONFIG_1:
+			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
+		}
 
-	mutex_lock(&adev->grbm_idx_mutex);
-	if (se_num != 0xffffffff || sh_num != 0xffffffff)
-		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+		mutex_lock(&adev->grbm_idx_mutex);
+		if (se_num != 0xffffffff || sh_num != 0xffffffff)
+			amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
 
-	val = RREG32(reg_offset);
+		val = RREG32(reg_offset);
 
-	if (se_num != 0xffffffff || sh_num != 0xffffffff)
-		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
-	mutex_unlock(&adev->grbm_idx_mutex);
-	return val;
+		if (se_num != 0xffffffff || sh_num != 0xffffffff)
+			amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+		mutex_unlock(&adev->grbm_idx_mutex);
+		return val;
+	} else {
+		unsigned idx;
+
+		switch (reg_offset) {
+		case mmGB_ADDR_CONFIG:
+			return adev->gfx.config.gb_addr_config;
+		case mmMC_ARB_RAMCFG:
+			return adev->gfx.config.mc_arb_ramcfg;
+		case mmGB_TILE_MODE0:
+		case mmGB_TILE_MODE1:
+		case mmGB_TILE_MODE2:
+		case mmGB_TILE_MODE3:
+		case mmGB_TILE_MODE4:
+		case mmGB_TILE_MODE5:
+		case mmGB_TILE_MODE6:
+		case mmGB_TILE_MODE7:
+		case mmGB_TILE_MODE8:
+		case mmGB_TILE_MODE9:
+		case mmGB_TILE_MODE10:
+		case mmGB_TILE_MODE11:
+		case mmGB_TILE_MODE12:
+		case mmGB_TILE_MODE13:
+		case mmGB_TILE_MODE14:
+		case mmGB_TILE_MODE15:
+		case mmGB_TILE_MODE16:
+		case mmGB_TILE_MODE17:
+		case mmGB_TILE_MODE18:
+		case mmGB_TILE_MODE19:
+		case mmGB_TILE_MODE20:
+		case mmGB_TILE_MODE21:
+		case mmGB_TILE_MODE22:
+		case mmGB_TILE_MODE23:
+		case mmGB_TILE_MODE24:
+		case mmGB_TILE_MODE25:
+		case mmGB_TILE_MODE26:
+		case mmGB_TILE_MODE27:
+		case mmGB_TILE_MODE28:
+		case mmGB_TILE_MODE29:
+		case mmGB_TILE_MODE30:
+		case mmGB_TILE_MODE31:
+			idx = (reg_offset - mmGB_TILE_MODE0);
+			return adev->gfx.config.tile_mode_array[idx];
+		case mmGB_MACROTILE_MODE0:
+		case mmGB_MACROTILE_MODE1:
+		case mmGB_MACROTILE_MODE2:
+		case mmGB_MACROTILE_MODE3:
+		case mmGB_MACROTILE_MODE4:
+		case mmGB_MACROTILE_MODE5:
+		case mmGB_MACROTILE_MODE6:
+		case mmGB_MACROTILE_MODE7:
+		case mmGB_MACROTILE_MODE8:
+		case mmGB_MACROTILE_MODE9:
+		case mmGB_MACROTILE_MODE10:
+		case mmGB_MACROTILE_MODE11:
+		case mmGB_MACROTILE_MODE12:
+		case mmGB_MACROTILE_MODE13:
+		case mmGB_MACROTILE_MODE14:
+		case mmGB_MACROTILE_MODE15:
+			idx = (reg_offset - mmGB_MACROTILE_MODE0);
+			return adev->gfx.config.macrotile_mode_array[idx];
+		default:
+			return RREG32(reg_offset);
+		}
+	}
 }
 
 static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
@@ -605,10 +684,9 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
 			if (reg_offset != asic_register_entry->reg_offset)
 				continue;
 			if (!asic_register_entry->untouched)
-				*value = asic_register_entry->grbm_indexed ?
-					vi_read_indexed_register(adev, se_num,
-								 sh_num, reg_offset) :
-					RREG32(reg_offset);
+				*value = vi_get_register_value(adev,
+							       asic_register_entry->grbm_indexed,
+							       se_num, sh_num, reg_offset);
 			return 0;
 		}
 	}
@@ -618,10 +696,9 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
 			continue;
 
 		if (!vi_allowed_read_registers[i].untouched)
-			*value = vi_allowed_read_registers[i].grbm_indexed ?
-				vi_read_indexed_register(adev, se_num,
-							 sh_num, reg_offset) :
-				RREG32(reg_offset);
+			*value = vi_get_register_value(adev,
+						       vi_allowed_read_registers[i].grbm_indexed,
+						       se_num, sh_num, reg_offset);
 		return 0;
 	}
 	return -EINVAL;
@@ -652,18 +729,6 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
 	return -EINVAL;
 }
 
-static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
-{
-	u32 tmp = RREG32(mmBIOS_SCRATCH_3);
-
-	if (hung)
-		tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
-	else
-		tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
-
-	WREG32(mmBIOS_SCRATCH_3, tmp);
-}
-
 /**
  * vi_asic_reset - soft reset GPU
  *
@@ -677,11 +742,11 @@ static int vi_asic_reset(struct amdgpu_device *adev)
 {
 	int r;
 
-	vi_set_bios_scratch_engine_hung(adev, true);
+	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
 
 	r = vi_gpu_pci_config_reset(adev);
 
-	vi_set_bios_scratch_engine_hung(adev, false);
+	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
 
 	return r;
 }
@@ -781,734 +846,6 @@ static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
 	WREG32(mmBIF_DOORBELL_APER_EN, tmp);
 }
 
-/* topaz has no DCE, UVD, VCE */
-static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vi_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 7,
-		.minor = 4,
-		.rev = 0,
-		.funcs = &gmc_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 2,
-		.minor = 4,
-		.rev = 0,
-		.funcs = &iceland_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gfx_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 2,
-		.minor = 4,
-		.rev = 0,
-		.funcs = &sdma_v2_4_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version topaz_ip_blocks_vd[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vi_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 7,
-		.minor = 4,
-		.rev = 0,
-		.funcs = &gmc_v7_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 2,
-		.minor = 4,
-		.rev = 0,
-		.funcs = &iceland_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 1,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &dce_virtual_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gfx_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 2,
-		.minor = 4,
-		.rev = 0,
-		.funcs = &sdma_v2_4_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vi_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &tonga_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 10,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &dce_v10_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gfx_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &sdma_v3_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 5,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &uvd_v5_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v3_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version tonga_ip_blocks_vd[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vi_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &tonga_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 10,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &dce_virtual_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gfx_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &sdma_v3_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 5,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &uvd_v5_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v3_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vi_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 8,
-		.minor = 5,
-		.rev = 0,
-		.funcs = &gmc_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &tonga_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 10,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &dce_v10_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gfx_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &sdma_v3_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 6,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &uvd_v6_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v3_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version fiji_ip_blocks_vd[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vi_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 8,
-		.minor = 5,
-		.rev = 0,
-		.funcs = &gmc_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &tonga_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 10,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &dce_virtual_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gfx_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &sdma_v3_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 6,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &uvd_v6_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v3_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version polaris11_ip_blocks[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vi_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 8,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &gmc_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 3,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &tonga_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 11,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &dce_v11_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gfx_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 3,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &sdma_v3_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 6,
-		.minor = 3,
-		.rev = 0,
-		.funcs = &uvd_v6_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 3,
-		.minor = 4,
-		.rev = 0,
-		.funcs = &vce_v3_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version polaris11_ip_blocks_vd[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vi_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 8,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &gmc_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 3,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &tonga_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 7,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 11,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &dce_virtual_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gfx_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 3,
-		.minor = 1,
-		.rev = 0,
-		.funcs = &sdma_v3_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 6,
-		.minor = 3,
-		.rev = 0,
-		.funcs = &uvd_v6_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 3,
-		.minor = 4,
-		.rev = 0,
-		.funcs = &vce_v3_0_ip_funcs,
-	},
-};
-
-static const struct amdgpu_ip_block_version cz_ip_blocks[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vi_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cz_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 11,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &dce_v11_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gfx_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &sdma_v3_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 6,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &uvd_v6_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v3_0_ip_funcs,
-	},
-#if defined(CONFIG_DRM_AMD_ACP)
-	{
-		.type = AMD_IP_BLOCK_TYPE_ACP,
-		.major = 2,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &acp_ip_funcs,
-	},
-#endif
-};
-
-static const struct amdgpu_ip_block_version cz_ip_blocks_vd[] =
-{
-	/* ORDER MATTERS! */
-	{
-		.type = AMD_IP_BLOCK_TYPE_COMMON,
-		.major = 2,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vi_common_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GMC,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gmc_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_IH,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &cz_ih_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SMC,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &amdgpu_pp_ip_funcs
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_DCE,
-		.major = 11,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &dce_virtual_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_GFX,
-		.major = 8,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &gfx_v8_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_SDMA,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &sdma_v3_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_UVD,
-		.major = 6,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &uvd_v6_0_ip_funcs,
-	},
-	{
-		.type = AMD_IP_BLOCK_TYPE_VCE,
-		.major = 3,
-		.minor = 0,
-		.rev = 0,
-		.funcs = &vce_v3_0_ip_funcs,
-	},
-#if defined(CONFIG_DRM_AMD_ACP)
-	{
-		.type = AMD_IP_BLOCK_TYPE_ACP,
-		.major = 2,
-		.minor = 2,
-		.rev = 0,
-		.funcs = &acp_ip_funcs,
-	},
-#endif
-};
-
-int vi_set_ip_blocks(struct amdgpu_device *adev)
-{
-	if (adev->enable_virtual_display) {
-		switch (adev->asic_type) {
-		case CHIP_TOPAZ:
-			adev->ip_blocks = topaz_ip_blocks_vd;
-			adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks_vd);
-			break;
-		case CHIP_FIJI:
-			adev->ip_blocks = fiji_ip_blocks_vd;
-			adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks_vd);
-			break;
-		case CHIP_TONGA:
-			adev->ip_blocks = tonga_ip_blocks_vd;
-			adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks_vd);
-			break;
-		case CHIP_POLARIS11:
-		case CHIP_POLARIS10:
-			adev->ip_blocks = polaris11_ip_blocks_vd;
-			adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks_vd);
-			break;
-
-		case CHIP_CARRIZO:
-		case CHIP_STONEY:
-			adev->ip_blocks = cz_ip_blocks_vd;
-			adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks_vd);
-			break;
-		default:
-			/* FIXME: not supported yet */
-			return -EINVAL;
-		}
-	} else {
-		switch (adev->asic_type) {
-		case CHIP_TOPAZ:
-			adev->ip_blocks = topaz_ip_blocks;
-			adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
-			break;
-		case CHIP_FIJI:
-			adev->ip_blocks = fiji_ip_blocks;
-			adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
-			break;
-		case CHIP_TONGA:
-			adev->ip_blocks = tonga_ip_blocks;
-			adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
-			break;
-		case CHIP_POLARIS11:
-		case CHIP_POLARIS10:
-			adev->ip_blocks = polaris11_ip_blocks;
-			adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks);
-			break;
-		case CHIP_CARRIZO:
-		case CHIP_STONEY:
-			adev->ip_blocks = cz_ip_blocks;
-			adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
-			break;
-		default:
-			/* FIXME: not supported yet */
-			return -EINVAL;
-		}
-	}
-
-	return 0;
-}
-
 #define ATI_REV_ID_FUSE_MACRO__ADDRESS      0xC0014044
 #define ATI_REV_ID_FUSE_MACRO__SHIFT        9
 #define ATI_REV_ID_FUSE_MACRO__MASK         0x00001E00
@@ -1593,7 +930,7 @@ static int vi_common_early_init(void *handle)
 		break;
 	case CHIP_TONGA:
 		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG;
-		adev->pg_flags = 0;
+		adev->pg_flags = AMD_PG_SUPPORT_UVD;
 		adev->external_rev_id = adev->rev_id + 0x14;
 		break;
 	case CHIP_POLARIS11:
@@ -1908,7 +1245,7 @@ static int vi_common_set_powergating_state(void *handle,
 	return 0;
 }
 
-const struct amd_ip_funcs vi_common_ip_funcs = {
+static const struct amd_ip_funcs vi_common_ip_funcs = {
 	.name = "vi_common",
 	.early_init = vi_common_early_init,
 	.late_init = NULL,
@@ -1925,3 +1262,110 @@ const struct amd_ip_funcs vi_common_ip_funcs = {
 	.set_powergating_state = vi_common_set_powergating_state,
 };
 
+static const struct amdgpu_ip_block_version vi_common_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_COMMON,
+	.major = 1,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &vi_common_ip_funcs,
+};
+
+int vi_set_ip_blocks(struct amdgpu_device *adev)
+{
+	switch (adev->asic_type) {
+	case CHIP_TOPAZ:
+		/* topaz has no DCE, UVD, VCE */
+		amdgpu_ip_block_add(adev, &vi_common_ip_block);
+		amdgpu_ip_block_add(adev, &gmc_v7_4_ip_block);
+		amdgpu_ip_block_add(adev, &iceland_ih_ip_block);
+		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+		if (adev->enable_virtual_display)
+			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+		amdgpu_ip_block_add(adev, &sdma_v2_4_ip_block);
+		break;
+	case CHIP_FIJI:
+		amdgpu_ip_block_add(adev, &vi_common_ip_block);
+		amdgpu_ip_block_add(adev, &gmc_v8_5_ip_block);
+		amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
+		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+		if (adev->enable_virtual_display)
+			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+		else
+			amdgpu_ip_block_add(adev, &dce_v10_1_ip_block);
+		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+		amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+		amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
+		amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
+		break;
+	case CHIP_TONGA:
+		amdgpu_ip_block_add(adev, &vi_common_ip_block);
+		amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
+		amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
+		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+		if (adev->enable_virtual_display)
+			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+		else
+			amdgpu_ip_block_add(adev, &dce_v10_0_ip_block);
+		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+		amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+		amdgpu_ip_block_add(adev, &uvd_v5_0_ip_block);
+		amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
+		break;
+	case CHIP_POLARIS11:
+	case CHIP_POLARIS10:
+		amdgpu_ip_block_add(adev, &vi_common_ip_block);
+		amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block);
+		amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
+		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+		if (adev->enable_virtual_display)
+			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+		else
+			amdgpu_ip_block_add(adev, &dce_v11_2_ip_block);
+		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+		amdgpu_ip_block_add(adev, &sdma_v3_1_ip_block);
+		amdgpu_ip_block_add(adev, &uvd_v6_3_ip_block);
+		amdgpu_ip_block_add(adev, &vce_v3_4_ip_block);
+		break;
+	case CHIP_CARRIZO:
+		amdgpu_ip_block_add(adev, &vi_common_ip_block);
+		amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
+		amdgpu_ip_block_add(adev, &cz_ih_ip_block);
+		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+		if (adev->enable_virtual_display)
+			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+		else
+			amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
+		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
+		amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+		amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
+		amdgpu_ip_block_add(adev, &vce_v3_1_ip_block);
+#if defined(CONFIG_DRM_AMD_ACP)
+		amdgpu_ip_block_add(adev, &acp_ip_block);
+#endif
+		break;
+	case CHIP_STONEY:
+		amdgpu_ip_block_add(adev, &vi_common_ip_block);
+		amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
+		amdgpu_ip_block_add(adev, &cz_ih_ip_block);
+		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+		if (adev->enable_virtual_display)
+			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+		else
+			amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
+		amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block);
+		amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+		amdgpu_ip_block_add(adev, &uvd_v6_2_ip_block);
+		amdgpu_ip_block_add(adev, &vce_v3_4_ip_block);
+#if defined(CONFIG_DRM_AMD_ACP)
+		amdgpu_ip_block_add(adev, &acp_ip_block);
+#endif
+		break;
+	default:
+		/* FIXME: not supported yet */
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h
index 50209404246273d4716816131dc81c22f8dc26d1..575d7aed5d32f2476f9056923b68bd768e66885e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi.h
@@ -24,8 +24,6 @@
 #ifndef __VI_H__
 #define __VI_H__
 
-extern const struct amd_ip_funcs vi_common_ip_funcs;
-
 void vi_srbm_select(struct amdgpu_device *adev,
 		    u32 me, u32 pipe, u32 queue, u32 vmid);
 int vi_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index bec8125bceb0d2555b49b8badaafbf4494ac47f7..d1986276dbbd71cc144a48ec52bb14b908095509 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -84,6 +84,29 @@ enum amd_powergating_state {
 	AMD_PG_STATE_UNGATE,
 };
 
+struct amd_vce_state {
+	/* vce clocks */
+	u32 evclk;
+	u32 ecclk;
+	/* gpu clocks */
+	u32 sclk;
+	u32 mclk;
+	u8 clk_idx;
+	u8 pstate;
+};
+
+
+#define AMD_MAX_VCE_LEVELS 6
+
+enum amd_vce_level {
+	AMD_VCE_LEVEL_AC_ALL = 0,     /* AC, All cases */
+	AMD_VCE_LEVEL_DC_EE = 1,      /* DC, entropy encoding */
+	AMD_VCE_LEVEL_DC_LL_LOW = 2,  /* DC, low latency queue, res <= 720 */
+	AMD_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
+	AMD_VCE_LEVEL_DC_GP_LOW = 4,  /* DC, general purpose queue, res <= 720 */
+	AMD_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
+};
+
 /* CG flags */
 #define AMD_CG_SUPPORT_GFX_MGCG			(1 << 0)
 #define AMD_CG_SUPPORT_GFX_MGLS			(1 << 1)
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h
index 3014d4a58c43a2e3c6099890239e22ee390258f7..a9ef1562f43bde57536802d3df0371e444e30672 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h
@@ -176,6 +176,8 @@
 #define mmSMU1_SMU_SMC_IND_DATA                                                 0x83
 #define mmSMU2_SMU_SMC_IND_DATA                                                 0x85
 #define mmSMU3_SMU_SMC_IND_DATA                                                 0x87
+#define mmSMC_IND_INDEX_11														0x1AC
+#define mmSMC_IND_DATA_11														0x1AD
 #define ixRCU_UC_EVENTS                                                         0xc0000004
 #define ixRCU_MISC_CTRL                                                         0xc0000010
 #define ixCC_RCU_FUSES                                                          0xc00c0000
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
index 93391747998591b9df6caff88cf43bf554ebff2d..22dd4c2b7290647250a7391c9a6dc42a321003c2 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
@@ -87,6 +87,8 @@
 #define mmSMC_IND_DATA_6                                                        0x8d
 #define mmSMC_IND_INDEX_7                                                       0x8e
 #define mmSMC_IND_DATA_7                                                        0x8f
+#define mmSMC_IND_INDEX_11														0x1AC
+#define mmSMC_IND_DATA_11														0x1AD
 #define mmSMC_IND_ACCESS_CNTL                                                   0x92
 #define mmSMC_MESSAGE_0                                                         0x94
 #define mmSMC_RESP_0                                                            0x95
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
index 44b1855cb8df574e3eea7df650767781a005240b..eca2b851f25f78c14bb04d850feac1d15b0ff7fa 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
@@ -90,6 +90,8 @@
 #define mmSMC_IND_DATA_6                                                        0x8d
 #define mmSMC_IND_INDEX_7                                                       0x8e
 #define mmSMC_IND_DATA_7                                                        0x8f
+#define mmSMC_IND_INDEX_11														0x1AC
+#define mmSMC_IND_DATA_11														0x1AD
 #define mmSMC_IND_ACCESS_CNTL                                                   0x92
 #define mmSMC_MESSAGE_0                                                         0x94
 #define mmSMC_RESP_0                                                            0x95
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index df7c18b6a02a199bc9eda3fdec4f2406f5aabdf8..e4a1697ec1d31f4d04e5995c2975c4f2ef9fb745 100755
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -106,6 +106,7 @@ enum cgs_ucode_id {
 	CGS_UCODE_ID_CP_MEC_JT2,
 	CGS_UCODE_ID_GMCON_RENG,
 	CGS_UCODE_ID_RLC_G,
+	CGS_UCODE_ID_STORAGE,
 	CGS_UCODE_ID_MAXIMUM,
 };
 
@@ -619,6 +620,8 @@ typedef int (*cgs_call_acpi_method)(struct cgs_device *cgs_device,
 typedef int (*cgs_query_system_info)(struct cgs_device *cgs_device,
 				struct cgs_system_info *sys_info);
 
+typedef int (*cgs_is_virtualization_enabled_t)(void *cgs_device);
+
 struct cgs_ops {
 	/* memory management calls (similar to KFD interface) */
 	cgs_gpu_mem_info_t gpu_mem_info;
@@ -670,6 +673,7 @@ struct cgs_ops {
 	cgs_call_acpi_method call_acpi_method;
 	/* get system info */
 	cgs_query_system_info query_system_info;
+	cgs_is_virtualization_enabled_t is_virtualization_enabled;
 };
 
 struct cgs_os_ops; /* To be define in OS-specific CGS header */
@@ -773,4 +777,6 @@ struct cgs_device
 	CGS_CALL(get_pci_resource, cgs_device, resource_type, size, offset, \
 	resource_base)
 
+#define cgs_is_virtualization_enabled(cgs_device) \
+		CGS_CALL(is_virtualization_enabled, cgs_device)
 #endif /* _CGS_COMMON_H */
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 7174f7a68266242651e5c451f299c57996865e90..0b1f2205c2f19958c1d0e98a3b03ce043dba543f 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -436,7 +436,8 @@ static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type  state)
 	}
 }
 
-int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input, void *output)
+static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id,
+		void *input, void *output)
 {
 	int ret = 0;
 	struct pp_instance *pp_handle;
@@ -475,7 +476,7 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
 	return ret;
 }
 
-enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
+static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
 {
 	struct pp_hwmgr *hwmgr;
 	struct pp_power_state *state;
@@ -820,6 +821,21 @@ static int pp_dpm_read_sensor(void *handle, int idx, int32_t *value)
 	return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value);
 }
 
+static struct amd_vce_state*
+pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
+{
+	struct pp_hwmgr *hwmgr;
+
+	if (handle) {
+		hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+		if (hwmgr && idx < hwmgr->num_vce_state_tables)
+			return &hwmgr->vce_states[idx];
+	}
+
+	return NULL;
+}
+
 const struct amd_powerplay_funcs pp_dpm_funcs = {
 	.get_temperature = pp_dpm_get_temperature,
 	.load_firmware = pp_dpm_load_fw,
@@ -846,6 +862,7 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
 	.get_mclk_od = pp_dpm_get_mclk_od,
 	.set_mclk_od = pp_dpm_set_mclk_od,
 	.read_sensor = pp_dpm_read_sensor,
+	.get_vce_clock_state = pp_dpm_get_vce_clock_state,
 };
 
 static int amd_pp_instance_init(struct amd_pp_init *pp_init,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 960424913496d671d70fa220600fab6874dbd67d..4b14f259a147039e8e0eacc92da77f6266a2571e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -66,7 +66,7 @@ static const struct cz_power_state *cast_const_PhwCzPowerState(
 	return (struct cz_power_state *)hw_ps;
 }
 
-uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr,
+static uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr,
 					uint32_t clock, uint32_t msg)
 {
 	int i = 0;
@@ -1017,7 +1017,7 @@ static int cz_tf_program_bootup_state(struct pp_hwmgr *hwmgr, void *input,
 	return 0;
 }
 
-int cz_tf_reset_acp_boot_level(struct pp_hwmgr *hwmgr, void *input,
+static int cz_tf_reset_acp_boot_level(struct pp_hwmgr *hwmgr, void *input,
 				void *output, void *storage, int result)
 {
 	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -1225,7 +1225,7 @@ static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
+static int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
 {
 	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
 
@@ -1239,7 +1239,7 @@ int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
+static int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
 {
 	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
 	struct phm_clock_voltage_dependency_table *table =
@@ -1277,7 +1277,7 @@ int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
+static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
 {
 	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
 
@@ -1533,7 +1533,7 @@ static int cz_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
 	return result;
 }
 
-int cz_get_power_state_size(struct pp_hwmgr *hwmgr)
+static int cz_get_power_state_size(struct pp_hwmgr *hwmgr)
 {
 	return sizeof(struct cz_power_state);
 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
index 1944d289f84603f0c0eab21a478a0c5ace8835a7..f5e8fda964f7836a753bb0fa6f2558613b5d2a33 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
@@ -25,6 +25,7 @@
 #include "linux/delay.h"
 #include "hwmgr.h"
 #include "amd_acpi.h"
+#include "pp_acpi.h"
 
 bool acpi_atcs_functions_supported(void *device, uint32_t index)
 {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index 7de701d8a450a624bcc57c9802338676ab573b0b..baf0f3d4c2f0f88dee9556c45b324b18787f3cbd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -131,7 +131,7 @@ static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps)
 /**
  * Private Function to get the PowerPlay Table Address.
  */
-const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
+static const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
 {
 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
 
@@ -1049,7 +1049,7 @@ static int check_powerplay_tables(
 	return 0;
 }
 
-int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
+static int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
 {
 	int result = 0;
 	const ATOM_Tonga_POWERPLAYTABLE *powerplay_table;
@@ -1100,7 +1100,7 @@ int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
 	return result;
 }
 
-int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr)
+static int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr)
 {
 	struct phm_ppt_v1_information *pp_table_information =
 		(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -1211,7 +1211,7 @@ static int ppt_get_num_of_vce_state_table_entries_v1_0(struct pp_hwmgr *hwmgr)
 }
 
 static int ppt_get_vce_state_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t i,
-		struct pp_vce_state *vce_state, void **clock_info, uint32_t *flag)
+		struct amd_vce_state *vce_state, void **clock_info, uint32_t *flag)
 {
 	const ATOM_Tonga_VCE_State_Record *vce_state_record;
 	ATOM_Tonga_SCLK_Dependency_Record *sclk_dep_record;
@@ -1315,7 +1315,7 @@ int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr,
 
 	hwmgr->num_vce_state_tables = i = ppt_get_num_of_vce_state_table_entries_v1_0(hwmgr);
 
-	if ((i != 0) && (i <= PP_MAX_VCE_LEVELS)) {
+	if ((i != 0) && (i <= AMD_MAX_VCE_LEVELS)) {
 		for (j = 0; j < i; j++)
 			ppt_get_vce_state_table_entry_v1_0(hwmgr, j, &(hwmgr->vce_states[j]), NULL, &flags);
 	}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
index ccf7ebeaf89267a830c1f2a0d3ecb1f22fe693f6..a4e9cf429e6233027535e553b17a738f0f90d3dd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
@@ -1507,7 +1507,7 @@ static int init_phase_shedding_table(struct pp_hwmgr *hwmgr,
 	return 0;
 }
 
-int get_number_of_vce_state_table_entries(
+static int get_number_of_vce_state_table_entries(
 						  struct pp_hwmgr *hwmgr)
 {
 	const ATOM_PPLIB_POWERPLAYTABLE *table =
@@ -1521,9 +1521,9 @@ int get_number_of_vce_state_table_entries(
 	return 0;
 }
 
-int get_vce_state_table_entry(struct pp_hwmgr *hwmgr,
+static int get_vce_state_table_entry(struct pp_hwmgr *hwmgr,
 							unsigned long i,
-							struct pp_vce_state *vce_state,
+							struct amd_vce_state *vce_state,
 							void **clock_info,
 							unsigned long *flag)
 {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index 6eb6db199250f3638588179d20a6c489e3484379..cf2ee93d8475270421b451d4355a9f8a6ff74aa4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -75,7 +75,7 @@ int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
+static int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
 {
 	if (phm_cf_want_uvd_power_gating(hwmgr)) {
 		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -91,7 +91,7 @@ int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
+static int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
 {
 	if (phm_cf_want_vce_power_gating(hwmgr))
 		return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -99,7 +99,7 @@ int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
+static int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
 {
 	if (phm_cf_want_vce_power_gating(hwmgr))
 		return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -107,7 +107,7 @@ int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
+static int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
 {
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 			PHM_PlatformCaps_SamuPowerGating))
@@ -116,7 +116,7 @@ int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
+static int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
 {
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 			PHM_PlatformCaps_SamuPowerGating))
@@ -149,15 +149,21 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
 	if (bgate) {
 		cgs_set_clockgating_state(hwmgr->device,
 				AMD_IP_BLOCK_TYPE_UVD,
-				AMD_CG_STATE_GATE);
+				AMD_CG_STATE_UNGATE);
+		cgs_set_powergating_state(hwmgr->device,
+						AMD_IP_BLOCK_TYPE_UVD,
+						AMD_PG_STATE_GATE);
 		smu7_update_uvd_dpm(hwmgr, true);
 		smu7_powerdown_uvd(hwmgr);
 	} else {
 		smu7_powerup_uvd(hwmgr);
-		smu7_update_uvd_dpm(hwmgr, false);
+		cgs_set_powergating_state(hwmgr->device,
+						AMD_IP_BLOCK_TYPE_UVD,
+						AMD_CG_STATE_UNGATE);
 		cgs_set_clockgating_state(hwmgr->device,
 				AMD_IP_BLOCK_TYPE_UVD,
-				AMD_CG_STATE_UNGATE);
+				AMD_CG_STATE_GATE);
+		smu7_update_uvd_dpm(hwmgr, false);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 609996c84ad5ae8681bc8fa4db53f7e4bcb548e2..073e0bfa22a017ac377c9fd82776433ca6f147b6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -89,7 +89,7 @@ enum DPM_EVENT_SRC {
 
 static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
 
-struct smu7_power_state *cast_phw_smu7_power_state(
+static struct smu7_power_state *cast_phw_smu7_power_state(
 				  struct pp_hw_power_state *hw_ps)
 {
 	PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
@@ -99,7 +99,7 @@ struct smu7_power_state *cast_phw_smu7_power_state(
 	return (struct smu7_power_state *)hw_ps;
 }
 
-const struct smu7_power_state *cast_const_phw_smu7_power_state(
+static const struct smu7_power_state *cast_const_phw_smu7_power_state(
 				 const struct pp_hw_power_state *hw_ps)
 {
 	PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
@@ -115,7 +115,7 @@ const struct smu7_power_state *cast_const_phw_smu7_power_state(
  * @param    hwmgr  the address of the powerplay hardware manager.
  * @return   always 0
  */
-int smu7_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
+static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
 {
 	cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
 
@@ -124,7 +124,7 @@ int smu7_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
+static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
 {
 	uint32_t speedCntl = 0;
 
@@ -135,7 +135,7 @@ uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
 			PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
 }
 
-int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
+static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
 {
 	uint32_t link_width;
 
@@ -155,7 +155,7 @@ int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
 * @param    pHwMgr  the address of the powerplay hardware manager.
 * @return   always PP_Result_OK
 */
-int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
+static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
 {
 	if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
 		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable);
@@ -802,7 +802,7 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
@@ -1153,7 +1153,7 @@ static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
 	return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
 }
 
-int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
+static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 	data->pcie_performance_request = true;
@@ -1161,7 +1161,7 @@ int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
 {
 	int tmp_result = 0;
 	int result = 0;
@@ -1352,6 +1352,8 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 	struct phm_ppt_v1_information *table_info =
 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct cgs_system_info sys_info = {0};
+	int result;
 
 	data->dll_default_on = false;
 	data->mclk_dpm0_activity_target = 0xa;
@@ -1439,6 +1441,18 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
 	data->pcie_lane_performance.min = 16;
 	data->pcie_lane_power_saving.max = 0;
 	data->pcie_lane_power_saving.min = 16;
+
+	sys_info.size = sizeof(struct cgs_system_info);
+	sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
+	result = cgs_query_system_info(hwmgr->device, &sys_info);
+	if (!result) {
+		if (sys_info.value & AMD_PG_SUPPORT_UVD)
+			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				      PHM_PlatformCaps_UVDPowerGating);
+		if (sys_info.value & AMD_PG_SUPPORT_VCE)
+			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				      PHM_PlatformCaps_VCEPowerGating);
+	}
 }
 
 /**
@@ -1864,7 +1878,7 @@ static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
+static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
 {
 	struct phm_ppt_v1_information *table_info =
 		       (struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -2253,7 +2267,7 @@ static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
 {
 	struct smu7_hwmgr *data;
 	int result;
@@ -3672,14 +3686,16 @@ static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
 			PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
 }
 
-int smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
+static int
+smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
 {
 	PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
 
 	return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ?  0 : -1;
 }
 
-int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
+static int
+smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
 {
 	uint32_t num_active_displays = 0;
 	struct cgs_display_info info = {0};
@@ -3701,7 +3717,7 @@ int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
 * @param    hwmgr  the address of the powerplay hardware manager.
 * @return   always OK
 */
-int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
+static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 	uint32_t num_active_displays = 0;
@@ -3751,7 +3767,7 @@ int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
 {
 	return smu7_program_display_gap(hwmgr);
 }
@@ -3775,13 +3791,14 @@ static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
 			PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
 }
 
-int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
+static int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
 					const void *thermal_interrupt_info)
 {
 	return 0;
 }
 
-bool smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
+static bool
+smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 	bool is_update_required = false;
@@ -3810,7 +3827,9 @@ static inline bool smu7_are_power_levels_equal(const struct smu7_performance_lev
 		  (pl1->pcie_lane == pl2->pcie_lane));
 }
 
-int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
+static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
+		const struct pp_hw_power_state *pstate1,
+		const struct pp_hw_power_state *pstate2, bool *equal)
 {
 	const struct smu7_power_state *psa;
 	const struct smu7_power_state *psb;
@@ -3843,7 +3862,7 @@ int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_sta
 	return 0;
 }
 
-int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr)
+static int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr)
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
@@ -3972,7 +3991,7 @@ static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
+static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
 {
 	int tmp_result, result = 0;
 
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
index 3fb5e57a378bc7df6fea185e975e1f7f5413bebe..eb3e83d7af31ddbab734a9f199776396f937def7 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
@@ -359,6 +359,7 @@ struct amd_powerplay_funcs {
 	int (*get_mclk_od)(void *handle);
 	int (*set_mclk_od)(void *handle, uint32_t value);
 	int (*read_sensor)(void *handle, int idx, int32_t *value);
+	struct amd_vce_state* (*get_vce_clock_state)(void *handle, unsigned idx);
 };
 
 struct amd_powerplay {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 4f0fedd1e9d37085e43e2fea63674aecaf74534f..e38b999e32355dd690823e468bf662ca765d5176 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -367,7 +367,7 @@ struct pp_table_func {
 	int (*pptable_get_vce_state_table_entry)(
 						struct pp_hwmgr *hwmgr,
 						unsigned long i,
-						struct pp_vce_state *vce_state,
+						struct amd_vce_state *vce_state,
 						void **clock_info,
 						unsigned long *flag);
 };
@@ -586,18 +586,6 @@ struct phm_microcode_version_info {
 	uint32_t NB;
 };
 
-#define PP_MAX_VCE_LEVELS 6
-
-enum PP_VCE_LEVEL {
-	PP_VCE_LEVEL_AC_ALL = 0,     /* AC, All cases */
-	PP_VCE_LEVEL_DC_EE = 1,      /* DC, entropy encoding */
-	PP_VCE_LEVEL_DC_LL_LOW = 2,  /* DC, low latency queue, res <= 720 */
-	PP_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
-	PP_VCE_LEVEL_DC_GP_LOW = 4,  /* DC, general purpose queue, res <= 720 */
-	PP_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
-};
-
-
 enum PP_TABLE_VERSION {
 	PP_TABLE_V0 = 0,
 	PP_TABLE_V1,
@@ -620,7 +608,7 @@ struct pp_hwmgr {
 	void *hardcode_pp_table;
 	bool need_pp_table_upload;
 
-	struct pp_vce_state vce_states[PP_MAX_VCE_LEVELS];
+	struct amd_vce_state vce_states[AMD_MAX_VCE_LEVELS];
 	uint32_t num_vce_state_tables;
 
 	enum amd_dpm_forced_level dpm_level;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/power_state.h b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
index 9ceaed9ac52aac4ed9c1874ef441965f26cd3ca4..827860fffe7847602954c9a8e7e7e503d7b1ac22 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/power_state.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
@@ -156,15 +156,6 @@ struct pp_power_state {
 	struct pp_hw_power_state  hardware;
 };
 
-
-/*Structure to hold a VCE state entry*/
-struct pp_vce_state {
-	uint32_t evclk;
-	uint32_t ecclk;
-	uint32_t sclk;
-	uint32_t mclk;
-};
-
 enum PP_MMProfilingState {
 	PP_MMProfilingState_NA = 0,
 	PP_MMProfilingState_Started,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
index 3df5de2cdab0254becae8ab384e4f8feb2cd2c8d..8fe8ba9434ffa344e36503591f98e4501b23aff8 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
@@ -21,9 +21,6 @@
  *
  */
 
-extern bool acpi_atcs_functions_supported(void *device,
-							uint32_t index);
-extern int acpi_pcie_perf_request(void *device,
-						uint8_t perf_req,
-						bool advertise);
-extern bool acpi_atcs_notify_pcie_device_ready(void *device);
+bool acpi_atcs_functions_supported(void *device, uint32_t index);
+int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise);
+bool acpi_atcs_notify_pcie_device_ready(void *device);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
index 76310ac7ef0d12ac6e25a84847655261a284bd48..34523fe6ed6fc25c17900f9e537df494e400340a 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
@@ -2049,7 +2049,7 @@ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
@@ -2125,7 +2125,7 @@ uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
 			return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
 		}
 	}
-	printk("cant't get the offset of type %x member %x \n", type, member);
+	printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
 	return 0;
 }
 
@@ -2150,7 +2150,7 @@ uint32_t fiji_get_mac_definition(uint32_t value)
 		return SMU73_MAX_LEVELS_MVDD;
 	}
 
-	printk("cant't get the mac of %x \n", value);
+	printk(KERN_WARNING "can't get the mac of %x\n", value);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 02fe1df855a92b588df4faa3ac8543f64f26d357..b86e48fb40d13f70d28bfedd3cb51fa7bf10005c 100755
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -159,7 +159,7 @@ static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
 	return result;
 }
 
-int fiji_setup_pwr_virus(struct pp_smumgr *smumgr)
+static int fiji_setup_pwr_virus(struct pp_smumgr *smumgr)
 {
 	int i, result = -1;
 	uint32_t reg, data;
@@ -224,7 +224,7 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr)
 	return result;
 }
 
-int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
+static int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
 {
 	int result = 0;
 	uint32_t table_start;
@@ -260,7 +260,7 @@ int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
 	return result;
 }
 
-int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
+static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
 {
 	int32_t vr_config;
 	uint32_t table_start;
@@ -299,7 +299,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
 }
 
 /* Work in Progress */
-int fiji_restore_vft_table(struct pp_smumgr *smumgr)
+static int fiji_restore_vft_table(struct pp_smumgr *smumgr)
 {
 	struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
 
@@ -311,7 +311,7 @@ int fiji_restore_vft_table(struct pp_smumgr *smumgr)
 }
 
 /* Work in Progress */
-int fiji_save_vft_table(struct pp_smumgr *smumgr)
+static int fiji_save_vft_table(struct pp_smumgr *smumgr)
 {
 	struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
 
@@ -322,7 +322,7 @@ int fiji_save_vft_table(struct pp_smumgr *smumgr)
 		return -EINVAL;
 }
 
-int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
+static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
 {
 	struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
index 8c889caba420dc2d55ec9340d46ab0a92d468514..b579f0c175e69af1848f1395dd5124241f1acc47 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
@@ -2140,7 +2140,7 @@ uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
 			return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold);
 		}
 	}
-	printk("cant't get the offset of type %x member %x \n", type, member);
+	printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
 	return 0;
 }
 
@@ -2163,7 +2163,7 @@ uint32_t iceland_get_mac_definition(uint32_t value)
 		return SMU71_MAX_LEVELS_MVDD;
 	}
 
-	printk("cant't get the mac of %x \n", value);
+	printk(KERN_WARNING "can't get the mac of %x\n", value);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
index 4ccc0b72324de3e6c6e1e530a57f3bb070f92e6d..006b220716854e304908c2ebe0423e422b41d05b 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
@@ -2174,7 +2174,7 @@ uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
 			return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
 		}
 	}
-	printk("cant't get the offset of type %x member %x \n", type, member);
+	printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
 	return 0;
 }
 
@@ -2201,7 +2201,7 @@ uint32_t polaris10_get_mac_definition(uint32_t value)
 		return SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
 	}
 
-	printk("cant't get the mac of %x \n", value);
+	printk(KERN_WARNING "can't get the mac of %x\n", value);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 5c3598ab7daeb1f2f73984ec72102f582c961ce1..f38a68747df00f68d9e695be6e351d247a211bd7 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -118,7 +118,7 @@ static int polaris10_perform_btc(struct pp_smumgr *smumgr)
 }
 
 
-int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
+static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
 {
 	uint32_t vr_config;
 	uint32_t dpm_table_start;
@@ -172,7 +172,8 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
 	return 0;
 }
 
-int polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
+static int
+polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
 {
 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 6af744f42ec99ff16d9bcb92346fb5db6f87f2ae..6df0d6edfdd11503d23a7580114f6d9834296393 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -278,6 +278,9 @@ enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
 	case UCODE_ID_RLC_G:
 		result = CGS_UCODE_ID_RLC_G;
 		break;
+	case UCODE_ID_MEC_STORAGE:
+		result = CGS_UCODE_ID_STORAGE;
+		break;
 	default:
 		break;
 	}
@@ -452,6 +455,10 @@ int smu7_request_smu_load_fw(struct pp_smumgr *smumgr)
 	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
 				UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
 				"Failed to Get Firmware Entry.", return -EINVAL);
+	if (cgs_is_virtualization_enabled(smumgr->device))
+		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+				UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
+				"Failed to Get Firmware Entry.", return -EINVAL);
 
 	smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
 	smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index 76352f2423aebf3780e1efeba0c5e1b108ebe651..919be435b49c890368ca0f3dc6906fb3f90ddf2f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -28,8 +28,6 @@
 #include <pp_endian.h>
 
 #define SMC_RAM_END 0x40000
-#define mmSMC_IND_INDEX_11                              0x01AC
-#define mmSMC_IND_DATA_11                               0x01AD
 
 struct smu7_buffer_entry {
 	uint32_t data_size;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
index de2a24d85f48b94a0d17aafd41af3058839a4c6b..d08f6f19b4541c236195f4ef8b782477d818ea22 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
@@ -2651,7 +2651,7 @@ uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
 			return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
 		}
 	}
-	printk("cant't get the offset of type %x member %x\n", type, member);
+	printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member);
 	return 0;
 }
 
@@ -2675,7 +2675,7 @@ uint32_t tonga_get_mac_definition(uint32_t value)
 	case SMU_MAX_LEVELS_MVDD:
 		return SMU72_MAX_LEVELS_MVDD;
 	}
-	printk("cant't get the mac value %x\n", value);
+	printk(KERN_WARNING "can't get the mac value %x\n", value);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 608df4c90520278e59bfe75d3c3348d66af51e6c..7134fdf49210318c1ce6af25546d7781046baa87 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -230,6 +230,7 @@ struct ttm_bo_driver ast_bo_driver = {
 	.ttm_tt_populate = ast_ttm_tt_populate,
 	.ttm_tt_unpopulate = ast_ttm_tt_unpopulate,
 	.init_mem_type = ast_bo_init_mem_type,
+	.eviction_valuable = ttm_bo_eviction_valuable,
 	.evict_flags = ast_bo_evict_flags,
 	.move = NULL,
 	.verify_access = ast_bo_verify_access,
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 269cfca9ca06834dad2fad7359c3df13f4d97c09..099a3c688c26d06d4babaa298e81480899ac9443 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -199,6 +199,7 @@ struct ttm_bo_driver bochs_bo_driver = {
 	.ttm_tt_populate = ttm_pool_populate,
 	.ttm_tt_unpopulate = ttm_pool_unpopulate,
 	.init_mem_type = bochs_bo_init_mem_type,
+	.eviction_valuable = ttm_bo_eviction_valuable,
 	.evict_flags = bochs_bo_evict_flags,
 	.move = NULL,
 	.verify_access = bochs_bo_verify_access,
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index bb2438dd8733f4c2c64618629abf1e946395f02a..de52b20800e1673577623b2f343aa3ec337b480f 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -230,6 +230,7 @@ struct ttm_bo_driver cirrus_bo_driver = {
 	.ttm_tt_populate = cirrus_ttm_tt_populate,
 	.ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate,
 	.init_mem_type = cirrus_bo_init_mem_type,
+	.eviction_valuable = ttm_bo_eviction_valuable,
 	.evict_flags = cirrus_bo_evict_flags,
 	.move = NULL,
 	.verify_access = cirrus_bo_verify_access,
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 919b35f2ad2487443c97dc3af28916d67ba6c0fb..83272b4563294e73a153176e984bee82613a7da4 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -230,6 +230,7 @@ struct ttm_bo_driver mgag200_bo_driver = {
 	.ttm_tt_populate = mgag200_ttm_tt_populate,
 	.ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate,
 	.init_mem_type = mgag200_bo_init_mem_type,
+	.eviction_valuable = ttm_bo_eviction_valuable,
 	.evict_flags = mgag200_bo_evict_flags,
 	.move = NULL,
 	.verify_access = mgag200_bo_verify_access,
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 343b8659472cfb4b6b5e2a45c447346e381d41f2..e395cb6f511fc95717f7467bcc7aba38352b6d78 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1561,6 +1561,7 @@ struct ttm_bo_driver nouveau_bo_driver = {
 	.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
 	.invalidate_caches = nouveau_bo_invalidate_caches,
 	.init_mem_type = nouveau_bo_init_mem_type,
+	.eviction_valuable = ttm_bo_eviction_valuable,
 	.evict_flags = nouveau_bo_evict_flags,
 	.move_notify = nouveau_bo_move_ntfy,
 	.move = nouveau_bo_move,
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index e26c82db948b9f8ef9da2c14cd671c3b4782e1a4..11761330a6b8a52ca5d5fc8b74ac7c7d23f6bf6e 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -387,6 +387,7 @@ static struct ttm_bo_driver qxl_bo_driver = {
 	.ttm_tt_unpopulate = &qxl_ttm_tt_unpopulate,
 	.invalidate_caches = &qxl_invalidate_caches,
 	.init_mem_type = &qxl_init_mem_type,
+	.eviction_valuable = ttm_bo_eviction_valuable,
 	.evict_flags = &qxl_evict_flags,
 	.move = &qxl_bo_move,
 	.verify_access = &qxl_verify_access,
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 56bb758f4e332dbdf45d321306ae7875bb7a00d8..fa4f8f008e4dc3d93a799e6925c430e3d2e999ea 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -28,6 +28,7 @@
 #include <drm/radeon_drm.h>
 #include "radeon.h"
 #include "radeon_audio.h"
+#include "radeon_asic.h"
 #include "atom.h"
 #include <linux/backlight.h>
 
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index d960d3915408963569e6acbfe6612b70af631209..f8b05090232a9d3b23c85f379a98259cc6a42f36 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -27,6 +27,7 @@
  */
 #include <drm/drmP.h>
 #include "radeon.h"
+#include "radeon_asic.h"
 #include "evergreend.h"
 #include "evergreen_reg_safe.h"
 #include "cayman_reg_safe.h"
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index b69c8de35bd315b56e13986bd2f4447ae422d50e..595a19736458ad79a6d30571dffd9e3a9b10a1f3 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -28,6 +28,7 @@
 #include <linux/kernel.h>
 #include <drm/drmP.h>
 #include "radeon.h"
+#include "radeon_asic.h"
 #include "r600d.h"
 #include "r600_reg_safe.h"
 
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 5df3ec73021b522fddae1b8368e68058ae7c93c5..4134759a682315c9518b67e29951c7f177dadb1c 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -29,6 +29,7 @@
 
 #include "atom.h"
 #include "atom-bits.h"
+#include "radeon_asic.h"
 
 extern void
 radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 38e396dae0a994f987f0c567fa855445fb9d7042..c1135feb93c19ad061a61e73caa78df1f88fc59c 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -29,6 +29,7 @@
 #include <drm/radeon_drm.h>
 #include "radeon_reg.h"
 #include "radeon.h"
+#include "radeon_asic.h"
 #include "atom.h"
 
 /* 10 khz */
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index eb92aef46e3cfcf99a07d26272fac7725d0cfaa8..79c9b6f3f013b285cadd3ccc1570ddb126e98f67 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1651,7 +1651,10 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
 
 	radeon_suspend(rdev);
 	radeon_hpd_fini(rdev);
-	/* evict remaining vram memory */
+	/* evict remaining vram memory
+	 * This second call to evict vram is to evict the gart page table
+	 * using the CPU.
+	 */
 	radeon_bo_evict_vram(rdev);
 
 	radeon_agp_suspend(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index de504ea29c06d355758e79c6ca18345f42e55f88..6d1237d6e1b85db740f3630cde4ad53ffd64a768 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -223,7 +223,8 @@ radeon_dp_mst_mode_valid(struct drm_connector *connector,
 	return MODE_OK;
 }
 
-struct drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector)
+static struct
+drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector)
 {
 	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 
@@ -341,7 +342,8 @@ const struct drm_dp_mst_topology_cbs mst_cbs = {
 	.hotplug = radeon_dp_mst_hotplug,
 };
 
-struct radeon_connector *radeon_mst_find_connector(struct drm_encoder *encoder)
+static struct
+radeon_connector *radeon_mst_find_connector(struct drm_encoder *encoder)
 {
 	struct drm_device *dev = encoder->dev;
 	struct drm_connector *connector;
@@ -597,7 +599,7 @@ static const struct drm_encoder_helper_funcs radeon_mst_helper_funcs = {
 	.commit = radeon_mst_encoder_commit,
 };
 
-void radeon_dp_mst_encoder_destroy(struct drm_encoder *encoder)
+static void radeon_dp_mst_encoder_destroy(struct drm_encoder *encoder)
 {
 	drm_encoder_cleanup(encoder);
 	kfree(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 868c3ba2efaad119eb26ca28ffcf97e333962623..222a1fa41d7c96c2e1853fcddfd3f6d64f56a196 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -27,6 +27,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/radeon_drm.h>
 #include "radeon.h"
+#include "radeon_asic.h"
 #include "atom.h"
 #include <linux/backlight.h>
 #ifdef CONFIG_PMAC_BACKLIGHT
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 4b6542538ff91581272deadb6e971c50f2429033..326ad068c15aa63ef38a6c85fbcc368ad52bd801 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -47,6 +47,7 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev);
 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
 static void radeon_pm_update_profile(struct radeon_device *rdev);
 static void radeon_pm_set_clocks(struct radeon_device *rdev);
+static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev);
 
 int radeon_pm_get_type_index(struct radeon_device *rdev,
 			     enum radeon_pm_state_type ps_type,
@@ -79,6 +80,8 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
 				radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
 		}
 		mutex_unlock(&rdev->pm.mutex);
+		/* allow new DPM state to be picked */
+		radeon_pm_compute_clocks_dpm(rdev);
 	} else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
 		if (rdev->pm.profile == PM_PROFILE_AUTO) {
 			mutex_lock(&rdev->pm.mutex);
@@ -882,7 +885,8 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
 		dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
 	/* balanced states don't exist at the moment */
 	if (dpm_state == POWER_STATE_TYPE_BALANCED)
-		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+		dpm_state = rdev->pm.dpm.ac_power ?
+			POWER_STATE_TYPE_PERFORMANCE : POWER_STATE_TYPE_BATTERY;
 
 restart_search:
 	/* Pick the best power state based on current conditions */
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 3de5e6e216628233ef1ba997bfcab6ae09d24621..0cf03ccbf0a72521f3d80d38d2e0ba7b4d4ba6de 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -863,6 +863,7 @@ static struct ttm_bo_driver radeon_bo_driver = {
 	.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
 	.invalidate_caches = &radeon_invalidate_caches,
 	.init_mem_type = &radeon_init_mem_type,
+	.eviction_valuable = ttm_bo_eviction_valuable,
 	.evict_flags = &radeon_evict_flags,
 	.move = &radeon_bo_move,
 	.verify_access = &radeon_verify_access,
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index e402be8821c45271a276f721f1eb9c94fcf4afb5..143280dc085176420f4edd473e6da574af180fe2 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -7858,7 +7858,7 @@ static void si_program_aspm(struct radeon_device *rdev)
 	}
 }
 
-int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev)
+static int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev)
 {
 	unsigned i;
 
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index fc6217dfe4016ebe7ea270189b486cba4b7db76c..31fcf11a28310477ae30575c6ab7429c0b1d5a4a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -717,6 +717,20 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
 	return ret;
 }
 
+bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+			      const struct ttm_place *place)
+{
+	/* Don't evict this BO if it's outside of the
+	 * requested placement range
+	 */
+	if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
+	    (place->lpfn && place->lpfn <= bo->mem.start))
+		return false;
+
+	return true;
+}
+EXPORT_SYMBOL(ttm_bo_eviction_valuable);
+
 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 				uint32_t mem_type,
 				const struct ttm_place *place,
@@ -731,21 +745,16 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 	spin_lock(&glob->lru_lock);
 	list_for_each_entry(bo, &man->lru, lru) {
 		ret = __ttm_bo_reserve(bo, false, true, NULL);
-		if (!ret) {
-			if (place && (place->fpfn || place->lpfn)) {
-				/* Don't evict this BO if it's outside of the
-				 * requested placement range
-				 */
-				if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
-				    (place->lpfn && place->lpfn <= bo->mem.start)) {
-					__ttm_bo_unreserve(bo);
-					ret = -EBUSY;
-					continue;
-				}
-			}
+		if (ret)
+			continue;
 
-			break;
+		if (place && !bdev->driver->eviction_valuable(bo, place)) {
+			__ttm_bo_unreserve(bo);
+			ret = -EBUSY;
+			continue;
 		}
+
+		break;
 	}
 
 	if (ret) {
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index 80482ac5f95dc71ac06bdd2d3dacc68333e32006..4a1de9f811934c57ff41371bebb9ebd4cbbf4a4b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -425,6 +425,7 @@ static struct ttm_bo_driver virtio_gpu_bo_driver = {
 	.ttm_tt_unpopulate = &virtio_gpu_ttm_tt_unpopulate,
 	.invalidate_caches = &virtio_gpu_invalidate_caches,
 	.init_mem_type = &virtio_gpu_init_mem_type,
+	.eviction_valuable = ttm_bo_eviction_valuable,
 	.evict_flags = &virtio_gpu_evict_flags,
 	.move = &virtio_gpu_bo_move,
 	.verify_access = &virtio_gpu_verify_access,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 78b75ee3c93141d0d7733636aac665ca9caa9f06..c894a48a74a62c532ea430cf4d54f0934d07d482 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -849,6 +849,7 @@ struct ttm_bo_driver vmw_bo_driver = {
 	.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
 	.invalidate_caches = vmw_invalidate_caches,
 	.init_mem_type = vmw_init_mem_type,
+	.eviction_valuable = ttm_bo_eviction_valuable,
 	.evict_flags = vmw_evict_flags,
 	.move = NULL,
 	.verify_access = vmw_verify_access,
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 9eb940d6755feba2163526cefc2ef230a6d4ea69..bb6a3357a817d01966fb7ea38297a56f4176edb4 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -47,6 +47,8 @@ struct drm_mm_node;
 
 struct ttm_placement;
 
+struct ttm_place;
+
 /**
  * struct ttm_bus_placement
  *
@@ -395,6 +397,17 @@ extern int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev);
 extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
 					    int resched);
 
+/**
+ * ttm_bo_eviction_valuable
+ *
+ * @bo: The buffer object to evict
+ * @place: the placement we need to make room for
+ *
+ * Check if it is valuable to evict the BO to make room for the given placement.
+ */
+bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+			      const struct ttm_place *place);
+
 /**
  * ttm_bo_synccpu_write_grab
  *
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 4f0a92185995db9b52d2062d2eecf1d1b80c3abb..d3d83dfe89e2c38806c0c7e63921eda8af700ad7 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -371,9 +371,21 @@ struct ttm_bo_driver {
 	 * submission as a consequence.
 	 */
 
-	int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
-	int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
-			      struct ttm_mem_type_manager *man);
+	int (*invalidate_caches)(struct ttm_bo_device *bdev, uint32_t flags);
+	int (*init_mem_type)(struct ttm_bo_device *bdev, uint32_t type,
+			     struct ttm_mem_type_manager *man);
+
+	/**
+	 * struct ttm_bo_driver member eviction_valuable
+	 *
+	 * @bo: the buffer object to be evicted
+	 * @place: placement we need room for
+	 *
+	 * Check with the driver if it is valuable to evict a BO to make room
+	 * for a certain placement.
+	 */
+	bool (*eviction_valuable)(struct ttm_buffer_object *bo,
+				  const struct ttm_place *place);
 	/**
 	 * struct ttm_bo_driver member evict_flags:
 	 *
@@ -384,8 +396,9 @@ struct ttm_bo_driver {
 	 * finished, they'll end up in bo->mem.flags
 	 */
 
-	 void(*evict_flags) (struct ttm_buffer_object *bo,
-				struct ttm_placement *placement);
+	void (*evict_flags)(struct ttm_buffer_object *bo,
+			    struct ttm_placement *placement);
+
 	/**
 	 * struct ttm_bo_driver member move:
 	 *
@@ -399,10 +412,9 @@ struct ttm_bo_driver {
 	 *
 	 * Move a buffer between two memory regions.
 	 */
-	int (*move) (struct ttm_buffer_object *bo,
-		     bool evict, bool interruptible,
-		     bool no_wait_gpu,
-		     struct ttm_mem_reg *new_mem);
+	int (*move)(struct ttm_buffer_object *bo, bool evict,
+		    bool interruptible, bool no_wait_gpu,
+		    struct ttm_mem_reg *new_mem);
 
 	/**
 	 * struct ttm_bo_driver_member verify_access
@@ -416,8 +428,8 @@ struct ttm_bo_driver {
 	 * access for all buffer objects.
 	 * This function should return 0 if access is granted, -EPERM otherwise.
 	 */
-	int (*verify_access) (struct ttm_buffer_object *bo,
-			      struct file *filp);
+	int (*verify_access)(struct ttm_buffer_object *bo,
+			     struct file *filp);
 
 	/* hook to notify driver about a driver move so it
 	 * can do tiling things */
@@ -430,7 +442,7 @@ struct ttm_bo_driver {
 	/**
 	 * notify the driver that we're about to swap out this bo
 	 */
-	void (*swap_notify) (struct ttm_buffer_object *bo);
+	void (*swap_notify)(struct ttm_buffer_object *bo);
 
 	/**
 	 * Driver callback on when mapping io memory (for bo_move_memcpy
@@ -438,8 +450,10 @@ struct ttm_bo_driver {
 	 * the mapping is not use anymore. io_mem_reserve & io_mem_free
 	 * are balanced.
 	 */
-	int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
-	void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
+	int (*io_mem_reserve)(struct ttm_bo_device *bdev,
+			      struct ttm_mem_reg *mem);
+	void (*io_mem_free)(struct ttm_bo_device *bdev,
+			    struct ttm_mem_reg *mem);
 
 	/**
 	 * Optional driver callback for when BO is removed from the LRU.
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index d6b5a21f3d3c9f9c228ae0291b3ae9b19f486bcc..4684f378f046e2cda38d655d50e3899130987a03 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -81,6 +81,8 @@ extern "C" {
 #define AMDGPU_GEM_CREATE_VRAM_CLEARED		(1 << 3)
 /* Flag that create shadow bo(GTT) while allocating vram bo */
 #define AMDGPU_GEM_CREATE_SHADOW		(1 << 4)
+/* Flag that allocating the BO should use linear VRAM */
+#define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS	(1 << 5)
 
 struct drm_amdgpu_gem_create_in  {
 	/** the requested memory size */
@@ -436,6 +438,7 @@ struct drm_amdgpu_cs_chunk_data {
  *
  */
 #define AMDGPU_IDS_FLAGS_FUSION         0x1
+#define AMDGPU_IDS_FLAGS_PREEMPTION     0x2
 
 /* indicate if acceleration can be working */
 #define AMDGPU_INFO_ACCEL_WORKING		0x00
@@ -487,6 +490,10 @@ struct drm_amdgpu_cs_chunk_data {
 #define AMDGPU_INFO_VIS_VRAM_USAGE		0x17
 /* number of TTM buffer evictions */
 #define AMDGPU_INFO_NUM_EVICTIONS		0x18
+/* Query memory about VRAM and GTT domains */
+#define AMDGPU_INFO_MEMORY			0x19
+/* Query vce clock table */
+#define AMDGPU_INFO_VCE_CLOCK_TABLE		0x1A
 
 #define AMDGPU_INFO_MMR_SE_INDEX_SHIFT	0
 #define AMDGPU_INFO_MMR_SE_INDEX_MASK	0xff
@@ -572,6 +579,34 @@ struct drm_amdgpu_info_vram_gtt {
 	__u64 gtt_size;
 };
 
+struct drm_amdgpu_heap_info {
+	/** max. physical memory */
+	__u64 total_heap_size;
+
+	/** Theoretical max. available memory in the given heap */
+	__u64 usable_heap_size;
+
+	/**
+	 * Number of bytes allocated in the heap. This includes all processes
+	 * and private allocations in the kernel. It changes when new buffers
+	 * are allocated, freed, and moved. It cannot be larger than
+	 * heap_size.
+	 */
+	__u64 heap_usage;
+
+	/**
+	 * Theoretical possible max. size of buffer which
+	 * could be allocated in the given heap
+	 */
+	__u64 max_allocation;
+};
+
+struct drm_amdgpu_memory_info {
+	struct drm_amdgpu_heap_info vram;
+	struct drm_amdgpu_heap_info cpu_accessible_vram;
+	struct drm_amdgpu_heap_info gtt;
+};
+
 struct drm_amdgpu_info_firmware {
 	__u32 ver;
 	__u32 feature;
@@ -645,6 +680,24 @@ struct drm_amdgpu_info_hw_ip {
 	__u32  _pad;
 };
 
+#define AMDGPU_VCE_CLOCK_TABLE_ENTRIES		6
+
+struct drm_amdgpu_info_vce_clock_table_entry {
+	/** System clock */
+	__u32 sclk;
+	/** Memory clock */
+	__u32 mclk;
+	/** VCE clock */
+	__u32 eclk;
+	__u32 pad;
+};
+
+struct drm_amdgpu_info_vce_clock_table {
+	struct drm_amdgpu_info_vce_clock_table_entry entries[AMDGPU_VCE_CLOCK_TABLE_ENTRIES];
+	__u32 num_valid_entries;
+	__u32 pad;
+};
+
 /*
  * Supported GPU families
  */