[v4] test/gem_mocs_settings: Testing MOCS register settings

Submitted by Peter Antoine on April 11, 2016, 4:50 p.m.

Details

Message ID 1460393409-26115-1-git-send-email-peter.antoine@intel.com
State New
Headers show
Series "test/gem_mocs_settings: Testing MOCS register settings" ( rev: 2 ) in Intel GFX

Not browsing as part of any series.

Commit Message

Peter Antoine April 11, 2016, 4:50 p.m.
The MOCS registers were added in Gen9 and define the caching policy.
The registers are split into two sets. The first set controls the
EDRAM policy and have a set for each engine, the second set controls
the L3 policy. The two sets use the same index.

The RCS registers and the L3CC registers are stored in the RCS context.

The test checks that the registers are correct by checking the values by
directly reading them via MMIO, then again it tests them by reading them
from within a batch buffer. RCS engine is tested last as it programs the
registers via a batch buffer and this will invalidate the test for
workloads that don't use the render ring or don't run a render batch
first.

v2: Reorganised the structure.
    Added more tests. (Chris Wilson)
v3: Fixed a few bugs. (Chris Wilson)
v4: More Tidy-ups.    (Chris Wilson)
    SKL does does not have a snoop bit. (Peter Antoine)

Signed-off-by: Peter Antoine <peter.antoine@intel.com>
---
 lib/ioctl_wrappers.c      |  13 ++
 lib/ioctl_wrappers.h      |   1 +
 tests/Makefile.sources    |   1 +
 tests/gem_mocs_settings.c | 573 ++++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 588 insertions(+)
 create mode 100644 tests/gem_mocs_settings.c

Patch hide | download patch | download mbox

diff --git a/lib/ioctl_wrappers.c b/lib/ioctl_wrappers.c
index 076bce8..12912fe 100644
--- a/lib/ioctl_wrappers.c
+++ b/lib/ioctl_wrappers.c
@@ -1382,6 +1382,19 @@  void gem_require_ring(int fd, unsigned ring)
 	igt_require(gem_has_ring(fd, ring));
 }
 
+/**
+ * gem_require_mocs:
+ * @fd: open i915 drm file descriptor
+ *
+ * Feature test macro to query whether the device has MOCS registers.
+ * These exist gen 9+.
+ */
+void gem_require_mocs(int fd)
+{
+	const int gen = intel_gen(intel_get_drm_devid(fd));
+	igt_require(gen >= 9);
+}
+
 /* prime */
 
 /**
diff --git a/lib/ioctl_wrappers.h b/lib/ioctl_wrappers.h
index d986f61..1383062 100644
--- a/lib/ioctl_wrappers.h
+++ b/lib/ioctl_wrappers.h
@@ -154,6 +154,7 @@  bool gem_has_softpin(int fd);
 void gem_require_caching(int fd);
 bool gem_has_ring(int fd, unsigned ring);
 void gem_require_ring(int fd, unsigned ring);
+void gem_require_mocs(int fd);
 
 /* prime */
 struct local_dma_buf_sync {
diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index 43f232f..d483c9e 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -148,6 +148,7 @@  TESTS_progs = \
 	gem_lut_handle \
 	gem_mmap_offset_exhaustion \
 	gem_media_fill \
+	gem_mocs_settings \
 	gem_gpgpu_fill \
 	gem_pin \
 	gem_reg_read \
diff --git a/tests/gem_mocs_settings.c b/tests/gem_mocs_settings.c
new file mode 100644
index 0000000..c11b9c5
--- /dev/null
+++ b/tests/gem_mocs_settings.c
@@ -0,0 +1,573 @@ 
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+/** @file gem_mocs_settings.c
+ *
+ * Check that the MOCs cache settings are valid.
+ */
+
+#include "igt.h"
+#include "igt_gt.h"
+
+#define MAX_NUMBER_MOCS_REGISTERS	(64)
+
+enum {
+	NONE,
+	RESET,
+	SUSPEND,
+	HIBERNATE
+};
+
+#define GEN9_LNCFCMOCS0		(0xB020)	/* L3 Cache Control base */
+#define GEN9_GFX_MOCS_0		(0xc800)	/* Graphics MOCS base register*/
+#define GEN9_MFX0_MOCS_0	(0xc900)	/* Media 0 MOCS base register*/
+#define GEN9_MFX1_MOCS_0	(0xcA00)	/* Media 1 MOCS base register*/
+#define GEN9_VEBOX_MOCS_0	(0xcB00)	/* Video MOCS base register*/
+#define GEN9_BLT_MOCS_0		(0xcc00)	/* Blitter MOCS base register*/
+
+#define ENGINE_GFX	(I915_EXEC_RENDER)
+#define ENGINE_MFX0	(I915_EXEC_BSD)
+#define ENGINE_MFX0_ALT	(I915_EXEC_BSD | 1 << 13)
+#define ENGINE_MFX1	(I915_EXEC_BSD | 2 << 13)
+#define ENGINE_BLT	(I915_EXEC_BLT)
+#define ENGINE_VEBOX	(I915_EXEC_VEBOX)
+
+struct mocs_entry {
+	uint32_t	control_value;
+	uint16_t	l3cc_value;
+};
+
+struct mocs_table {
+	uint32_t		size;
+	const struct mocs_entry	*table;
+};
+
+/* The first entries in the MOCS tables are defined by uABI */
+static const struct mocs_entry skylake_mocs_table[] = {
+	{ 0x00000009, 0x0010 },
+	{ 0x00000038, 0x0030 },
+	{ 0x0000003b, 0x0030 },
+};
+
+static const struct mocs_entry broxton_mocs_table[] = {
+	{ 0x00000009, 0x0010 },
+	{ 0x00000038, 0x0030 },
+	{ 0x0000003b, 0x0030 },
+};
+
+static const struct mocs_entry dirty_broxton_mocs_table[] = {
+	{ 0x00007FFF, 0x003F },
+	{ 0x00007FFF, 0x003F },
+	{ 0x00007FFF, 0x003F },
+};
+
+static const struct mocs_entry dirty_skylake_mocs_table[] = {
+	{ 0x00003FFF, 0x003F }, /* no snoop bit */
+	{ 0x00003FFF, 0x003F },
+	{ 0x00003FFF, 0x003F },
+};
+
+static const uint32_t write_values[] = {
+	0xFFFFFFFF,
+	0xFFFFFFFF,
+	0xFFFFFFFF,
+	0xFFFFFFFF
+};
+
+static bool get_mocs_settings(uint32_t devid,
+			struct mocs_table *table,
+			bool dirty)
+{
+	bool result = false;
+
+	if (IS_SKYLAKE(devid) || IS_KABYLAKE(devid)) {
+		if (dirty) {
+			table->size  = ARRAY_SIZE(dirty_skylake_mocs_table);
+			table->table = dirty_skylake_mocs_table;
+		} else {
+			table->size  = ARRAY_SIZE(skylake_mocs_table);
+			table->table = skylake_mocs_table;
+		}
+		result = true;
+	} else if (IS_BROXTON(devid)) {
+		if (dirty) {
+			table->size  = ARRAY_SIZE(dirty_broxton_mocs_table);
+			table->table = dirty_broxton_mocs_table;
+		} else {
+			table->size  = ARRAY_SIZE(broxton_mocs_table);
+			table->table = broxton_mocs_table;
+		}
+		result = true;
+	}
+
+	return result;
+}
+
+static uint32_t get_engine_base(uint32_t engine)
+{
+	uint32_t result = 0;
+
+	switch(engine) {
+	case ENGINE_MFX0:
+	case ENGINE_MFX0_ALT:	result = GEN9_MFX0_MOCS_0;	break;
+	case ENGINE_GFX: 	result = GEN9_GFX_MOCS_0;	break;
+	case ENGINE_MFX1: 	result = GEN9_MFX1_MOCS_0;	break;
+	case ENGINE_BLT:	result = GEN9_BLT_MOCS_0;	break;
+	case ENGINE_VEBOX:	result = GEN9_VEBOX_MOCS_0;	break;
+	default:
+		igt_fail(-1);
+	}
+
+	return result;
+}
+
+static uint32_t get_mocs_register_value(int fd, uint64_t offset, uint32_t index)
+{
+	uint32_t regid = offset + (index * 4);
+
+	igt_assert(index < MAX_NUMBER_MOCS_REGISTERS);
+
+	return intel_register_read(regid);
+}
+
+static void test_mocs_control_values(int fd, uint32_t engine)
+{
+	int i;
+	uint32_t devid = intel_get_drm_devid(fd);
+	uint32_t engine_base = get_engine_base(engine);
+	struct mocs_table table;
+
+	igt_assert(get_mocs_settings(devid, &table, false));
+
+	for (i = 0; i < table.size; i++)
+		igt_assert_eq(get_mocs_register_value(fd, engine_base, i),
+				table.table[i].control_value);
+}
+
+static void test_mocs_l3cc_values(int fd)
+{
+	int i;
+	uint32_t reg_values[MAX_NUMBER_MOCS_REGISTERS/2];
+	struct mocs_table table;
+
+	for (i = 0; i < MAX_NUMBER_MOCS_REGISTERS / 2; i++)
+		reg_values[i] = intel_register_read(GEN9_LNCFCMOCS0 + (i * 4));
+
+	igt_assert(get_mocs_settings(intel_get_drm_devid(fd), &table, false));
+
+	for (i = 0; i < table.size / 2; i++) {
+		igt_assert_eq((reg_values[i] & 0xffff),
+				table.table[i * 2].l3cc_value);
+		igt_assert_eq((reg_values[i] >> 16),
+				table.table[i * 2 + 1].l3cc_value);
+	}
+
+	if (table.size & 1)
+		igt_assert_eq((reg_values[i] & 0xffff),
+				table.table[i * 2].l3cc_value);
+}
+
+static void test_mocs_values(int fd)
+{
+	uint32_t engine;
+
+	for_each_engine(fd, engine)
+		if (engine != I915_EXEC_DEFAULT)
+			test_mocs_control_values(fd, engine);
+
+	test_mocs_l3cc_values(fd);
+}
+
+#define MI_STORE_REGISTER_MEM_64_BIT_ADDR	((0x24 << 23) | 2)
+
+static int create_read_batch(struct drm_i915_gem_relocation_entry *reloc,
+			     uint32_t *batch,
+			     uint32_t dst_handle,
+			     uint32_t size,
+			     uint32_t reg_base)
+{
+	unsigned int index;
+	unsigned int offset = 0;
+
+	for (index = 0, offset = 0; index < size; index++, offset += 4)
+	{
+		batch[offset]   = MI_STORE_REGISTER_MEM_64_BIT_ADDR;
+		batch[offset+1] = reg_base + (index * sizeof(uint32_t));
+		batch[offset+2] = index * sizeof(uint32_t);	/* reloc */
+		batch[offset+3] = 0;
+
+		reloc[index].offset = (offset + 2) * sizeof(uint32_t);
+		reloc[index].delta = index * sizeof(uint32_t);
+		reloc[index].target_handle = dst_handle;
+		reloc[index].read_domains = I915_GEM_DOMAIN_RENDER;
+	}
+
+	batch[offset++] = MI_BATCH_BUFFER_END;
+	batch[offset++] = 0;
+
+	return offset * sizeof(uint32_t);
+}
+
+static void do_read_registers(int fd,
+			      uint32_t ctx_id,
+			      uint32_t dst_handle,
+			      uint32_t reg_base,
+			      uint32_t size,
+			      uint32_t engine_id)
+{
+	struct drm_i915_gem_execbuffer2 execbuf;
+	struct drm_i915_gem_exec_object2 gem_exec[2];
+	struct drm_i915_gem_relocation_entry reloc[MAX_NUMBER_MOCS_REGISTERS];
+	uint32_t batch[MAX_NUMBER_MOCS_REGISTERS * 4 + 4];
+	uint32_t handle = gem_create(fd, 4096);
+
+	memset(reloc, 0, sizeof(reloc));
+	memset(gem_exec, 0, sizeof(gem_exec));
+	memset(&execbuf, 0, sizeof(execbuf));
+
+	gem_exec[0].handle = dst_handle;
+
+	gem_exec[1].handle = handle;
+	gem_exec[1].relocation_count = size;
+	gem_exec[1].relocs_ptr = (uintptr_t) reloc;
+
+	execbuf.buffers_ptr = (uintptr_t)gem_exec;
+	execbuf.buffer_count = 2;
+	execbuf.batch_len = create_read_batch(reloc,
+					      batch,
+					      dst_handle,
+					      size,
+					      reg_base);
+
+	gem_write(fd, handle, 0, batch, execbuf.batch_len);
+
+	i915_execbuffer2_set_context_id(execbuf, ctx_id);
+
+	execbuf.flags = I915_EXEC_SECURE | engine_id;
+
+	gem_execbuf(fd, &execbuf);
+	gem_close(fd, handle);
+}
+
+#define LOCAL_MI_LOAD_REGISTER_IMM	(0x22 << 23)
+
+static int create_write_batch(uint32_t *batch,
+			      const uint32_t *values,
+			      uint32_t size,
+			      uint32_t reg_base)
+{
+	unsigned int i;
+	unsigned int offset = 0;
+
+	batch[offset++] = LOCAL_MI_LOAD_REGISTER_IMM | (size * 2 - 1);
+
+	for (i = 0; i < size; i++) {
+		batch[offset++] = reg_base + (i * 4);
+		batch[offset++] = values[i];
+	}
+
+	batch[offset++] = 0;
+	batch[offset++] = MI_BATCH_BUFFER_END;
+	batch[offset++] = 0;
+
+	return offset * sizeof(uint32_t);
+}
+
+static void write_registers(int fd,
+			    uint32_t ctx_id,
+			    uint32_t reg_base,
+			    const uint32_t *values,
+			    uint32_t size,
+			    uint32_t engine_id)
+{
+	struct drm_i915_gem_exec_object2 gem_exec;
+	struct drm_i915_gem_execbuffer2 execbuf;
+	uint32_t batch[MAX_NUMBER_MOCS_REGISTERS * 4 + 4];
+	uint32_t handle = gem_create(fd, 4096);
+
+	memset(&gem_exec, 0, sizeof(gem_exec));
+	memset(&execbuf, 0, sizeof(execbuf));
+
+	gem_exec.handle = handle;
+
+	execbuf.buffers_ptr = (uintptr_t)&gem_exec;
+	execbuf.buffer_count = 1;
+	execbuf.batch_len = create_write_batch(batch, values, size, reg_base);
+
+	gem_write(fd, handle, 0, batch, execbuf.batch_len);
+
+	if (ctx_id != 0)
+		i915_execbuffer2_set_context_id(execbuf, ctx_id);
+
+	execbuf.flags = I915_EXEC_SECURE | engine_id;
+
+	gem_execbuf(fd, &execbuf);
+	gem_close(fd, handle);
+}
+
+static void check_control_registers(int fd,
+				    const struct intel_execution_engine *engine,
+				    uint32_t ctx_id,
+				    bool dirty)
+{
+	uint32_t dst_handle = gem_create(fd, 4096);
+	uint32_t reg_base  = get_engine_base(engine->exec_id | engine->flags);
+	uint32_t *read_regs;
+	struct mocs_table table;
+
+	igt_assert(get_mocs_settings(intel_get_drm_devid(fd), &table, dirty));
+
+	do_read_registers(fd,
+			  ctx_id,
+			  dst_handle,
+			  reg_base,
+			  table.size,
+			  engine->exec_id | engine->flags);
+
+	gem_set_domain(fd, dst_handle,
+		       I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+
+	read_regs = gem_mmap__cpu(fd, dst_handle, 0, 4096, PROT_READ);
+
+	for (int index = 0; index < table.size; index++)
+		igt_assert_eq(read_regs[index],
+				table.table[index].control_value);
+
+	munmap(read_regs, 4096);
+	gem_close(fd, dst_handle);
+}
+
+static void check_l3cc_registers(int fd,
+				 const struct intel_execution_engine *engine,
+				 uint32_t ctx_id,
+				 bool dirty)
+{
+	struct mocs_table table;
+	uint32_t dst_handle = gem_create(fd, 4096);
+	uint32_t *read_regs;
+	int index;
+
+	igt_assert(get_mocs_settings(intel_get_drm_devid(fd), &table, dirty));
+
+	do_read_registers(fd,
+			  ctx_id,
+			  dst_handle,
+			  GEN9_LNCFCMOCS0,
+			  (table.size + 1) / 2,
+			  engine->exec_id | engine->flags);
+
+	gem_set_domain(fd, dst_handle,
+		       I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+
+	read_regs = gem_mmap__cpu(fd, dst_handle, 0, 4096, PROT_READ);
+
+	for (index = 0; index < table.size / 2; index++) {
+		igt_assert_eq(read_regs[index] & 0xffff,
+				table.table[index * 2].l3cc_value);
+		igt_assert_eq(read_regs[index] >> 16,
+				table.table[index * 2 + 1].l3cc_value);
+	}
+
+	if (table.size & 0x01)
+		igt_assert_eq(read_regs[index] & 0xffff,
+				table.table[index * 2].l3cc_value);
+
+	munmap(read_regs, 4096);
+	gem_close(fd, dst_handle);
+}
+
+static void test_context_mocs_values(int fd,
+				const struct intel_execution_engine *engine,
+				bool use_default)
+{
+	uint32_t ctx_id = 0;
+
+	if (!use_default)
+		ctx_id = gem_context_create(fd);
+
+	check_control_registers(fd, engine, ctx_id, false);
+	check_l3cc_registers(fd, engine, ctx_id, false);
+
+	if (!use_default)
+		gem_context_destroy(fd, ctx_id);
+}
+
+static void non_context_tests(unsigned mode)
+{
+	int fd = drm_open_driver_master(DRIVER_INTEL);
+	const struct intel_execution_engine *e;
+
+	igt_debug("Testing Non/Default Context Engines\n");
+
+	gem_require_mocs(fd);
+
+	test_mocs_values(fd);
+
+	switch(mode) {
+	case NONE:	break;
+	case RESET:	igt_force_gpu_reset();	break;
+	case SUSPEND:	igt_system_suspend_autoresume(); break;
+	case HIBERNATE:	igt_system_hibernate_autoresume(); break;
+	};
+
+	for (e = intel_execution_engines; e->name; e++)
+
+		if (e->exec_id != I915_EXEC_DEFAULT &&
+		    gem_has_ring(fd, e->exec_id | e->flags)) {
+			igt_debug("    Engine: %s\n", e->name);
+			test_context_mocs_values(fd, e, true);
+		}
+
+	test_mocs_values(fd);
+	close(fd);
+}
+
+static void context_save_restore_test(unsigned mode)
+{
+	int fd = drm_open_driver_master(DRIVER_INTEL);
+	const struct intel_execution_engine *e;
+	uint32_t ctx_id = gem_context_create(fd);
+
+	gem_require_mocs(fd);
+
+	igt_debug("Testing Save Restore\n");
+
+	for (e = intel_execution_engines; e->name; e++)
+		if (e->exec_id == I915_EXEC_RENDER) {
+			check_control_registers(fd, e, ctx_id, false);
+			check_l3cc_registers(fd, e, ctx_id, false);
+		}
+
+	switch(mode) {
+	case NONE:	break;
+	case RESET:	igt_force_gpu_reset();	break;
+	case SUSPEND:	igt_system_suspend_autoresume(); break;
+	case HIBERNATE:	igt_system_hibernate_autoresume(); break;
+	};
+
+	for (e = intel_execution_engines; e->name; e++)
+		if (e->exec_id == I915_EXEC_RENDER) {
+			check_control_registers(fd, e, ctx_id, false);
+			check_l3cc_registers(fd, e, ctx_id, false);
+		}
+
+	gem_context_destroy(fd, ctx_id);
+	close(fd);
+}
+
+static void context_dirty_test(unsigned mode)
+{
+	int fd = drm_open_driver_master(DRIVER_INTEL);
+	const struct intel_execution_engine *e;
+	uint32_t ctx_id = gem_context_create(fd);
+
+	gem_require_mocs(fd);
+
+	igt_debug("Testing Dirty Context\n");
+
+	for (e = intel_execution_engines; e->name; e++)
+		if (e->exec_id == I915_EXEC_RENDER) {
+			check_control_registers(fd, e, ctx_id, false);
+			check_l3cc_registers(fd, e, ctx_id, false);
+		}
+
+	write_registers(fd,
+			ctx_id,
+			GEN9_GFX_MOCS_0,
+			write_values,
+			ARRAY_SIZE(write_values),
+			I915_EXEC_RENDER);
+
+	write_registers(fd,
+			ctx_id,
+			GEN9_LNCFCMOCS0,
+			write_values,
+			ARRAY_SIZE(write_values),
+			I915_EXEC_RENDER);
+
+	for (e = intel_execution_engines; e->name; e++)
+		if (e->exec_id == I915_EXEC_RENDER) {
+			check_control_registers(fd, e, ctx_id, true);
+			check_l3cc_registers(fd, e, ctx_id, true);
+		}
+
+	switch(mode) {
+	case NONE:	break;
+	case RESET:	igt_force_gpu_reset();	break;
+	case SUSPEND:	igt_system_suspend_autoresume(); break;
+	case HIBERNATE:	igt_system_hibernate_autoresume(); break;
+	};
+
+	for (e = intel_execution_engines; e->name; e++)
+		if (e->exec_id == I915_EXEC_RENDER) {
+			check_control_registers(fd, e, ctx_id, true);
+			check_l3cc_registers(fd, e, ctx_id, true);
+		}
+
+	gem_context_destroy(fd, ctx_id);
+	close(fd);
+
+	fd = drm_open_driver_master(DRIVER_INTEL);
+
+	for (e = intel_execution_engines; e->name; e++)
+		if (e->exec_id == I915_EXEC_RENDER)
+			test_context_mocs_values(fd, e, false);
+
+	close(fd);
+}
+
+static void run_tests(unsigned mode)
+{
+	non_context_tests(mode);
+	context_save_restore_test(mode);
+	context_dirty_test(mode);
+}
+
+igt_main
+{
+	struct pci_device *pci_dev;
+
+	igt_fixture {
+		pci_dev = intel_get_pci_device();
+		igt_require(pci_dev);
+		intel_register_access_init(pci_dev, 0);
+	}
+
+	igt_subtest("mocs-settings")
+		run_tests(NONE);
+
+	igt_subtest("mocs-reset")
+		run_tests(RESET);
+
+	igt_subtest("mocs-suspend")
+		run_tests(SUSPEND);
+
+	igt_subtest("mocs-hibernate")
+		run_tests(HIBERNATE);
+
+	igt_fixture {
+		intel_register_access_fini();
+	}
+}
+

Comments

On Mon, Apr 11, 2016 at 05:50:09PM +0100, Peter Antoine wrote:
> The MOCS registers were added in Gen9 and define the caching policy.
> The registers are split into two sets. The first set controls the
> EDRAM policy and have a set for each engine, the second set controls
> the L3 policy. The two sets use the same index.
> 
> The RCS registers and the L3CC registers are stored in the RCS context.
> 
> The test checks that the registers are correct by checking the values by
> directly reading them via MMIO, then again it tests them by reading them
> from within a batch buffer. RCS engine is tested last as it programs the
> registers via a batch buffer and this will invalidate the test for
> workloads that don't use the render ring or don't run a render batch
> first.
> 
> v2: Reorganised the structure.
>     Added more tests. (Chris Wilson)
> v3: Fixed a few bugs. (Chris Wilson)
> v4: More Tidy-ups.    (Chris Wilson)
>     SKL does does not have a snoop bit. (Peter Antoine)

Still got the domain control wrong, but I fixed that up and pushed.
I also tweaked a few of the tests along the way...
-Chris