[i-g-t,2/4] tests/gem_ctx_sseu: Dynamic (sub)slice programming tests

Submitted by Tvrtko Ursulin on Jan. 8, 2019, 3:13 p.m.

Details

Message ID 20190108151303.4302-3-tvrtko.ursulin@linux.intel.com
State New
Series "Per context dynamic (sub)slice power-gating"
Headers show

Commit Message

Tvrtko Ursulin Jan. 8, 2019, 3:13 p.m.
From: Lionel Landwerlin <lionel.g.landwerlin@intel.com>

Verify that the per-context dynamic SSEU uAPI works as expected.

v2: Add subslice tests (Lionel)
    Use MI_SET_PREDICATE for further verification when available (Lionel)

v3: Rename to gem_ctx_rpcs (Lionel)

v4: Update kernel API (Lionel)
    Add 0 value test (Lionel)
    Exercise invalid values (Lionel)

v5: Add perf tests (Lionel)

v6: Add new sysfs entry tests (Lionel)

v7: Test rsvd fields
    Update for kernel series changes

v8: Drop test_no_sseu_support() test (Kelvin)
    Drop drm_intel_*() apis (Chris)

v9: by Chris:
    Drop all do_ioctl/do_ioctl_err()
    Use gem_context_[gs]et_param()
    Use gem_read() instead of mapping memory
    by Lionel:
    Test dynamic sseu on/off more

Tvrtko Ursulin:

v10:
 * Various style tweaks and refactorings.
 * New test coverage.

v11:
 * Change platform support to just Gen11.
 * Simplify availability test. (Chris Wilson)
 * More invalid pointer tests. (Chris Wilson)

v12:
 * Fix MAP_FIXED use (doh!).
 * Fix get/set copy&paste errors.
 * Drop supported platform test. (Chris Wilson)
 * Add mmap__gtt test. (Chris Wilson)

v13:
 * Commit message tweaks.
 * Added reset/hang/suspend tests. (Chris Wilson)
 * Assert spinner is busy. (Chris Wilson)
 * Remove some more ABI assumptions. (Chris Wilson)

v14:
 * Use default resume time. (Chris Wilson)
 * Trigger hang after rpcs read batch has been submitted. (Chris Wilson)

v15:
 * Adjust for uAPI restrictions.

v16:
 * Build system changes.

v17:
 * Remove all subtests which read the RPCS register. (Joonas Lahtinen)

v18:
 * Tidy curly braces. (Joonas Lahtinen)

v19:
 * Check flags/rsvd MBZ.

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> # v14
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 tests/Makefile.am          |   1 +
 tests/Makefile.sources     |   3 +
 tests/i915/gem_ctx_param.c |   4 +-
 tests/i915/gem_ctx_sseu.c  | 541 +++++++++++++++++++++++++++++++++++++
 tests/meson.build          |   8 +
 5 files changed, 556 insertions(+), 1 deletion(-)
 create mode 100644 tests/i915/gem_ctx_sseu.c

Patch hide | download patch | download mbox

diff --git a/tests/Makefile.am b/tests/Makefile.am
index 48d77535b6bd..42463bde7f30 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -111,6 +111,7 @@  gem_close_race_CFLAGS = $(AM_CFLAGS) $(THREAD_CFLAGS)
 gem_close_race_LDADD = $(LDADD) -lpthread
 gem_ctx_thrash_CFLAGS = $(AM_CFLAGS) $(THREAD_CFLAGS)
 gem_ctx_thrash_LDADD = $(LDADD) -lpthread
+gem_ctx_sseu_LDADD = $(LDADD) $(top_builddir)/lib/libigt_perf.la
 gem_exec_capture_LDADD = $(LDADD) -lz
 gem_exec_parallel_CFLAGS = $(AM_CFLAGS) $(THREAD_CFLAGS)
 gem_exec_parallel_LDADD = $(LDADD) -lpthread
diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index eedde1e817cb..3dfeb5b67274 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -161,6 +161,9 @@  gem_ctx_isolation_SOURCES = i915/gem_ctx_isolation.c
 TESTS_progs += gem_ctx_param
 gem_ctx_param_SOURCES = i915/gem_ctx_param.c
 
+TESTS_progs += gem_ctx_sseu
+gem_ctx_sseu_SOURCES = i915/gem_ctx_sseu.c
+
 TESTS_progs += gem_ctx_switch
 gem_ctx_switch_SOURCES = i915/gem_ctx_switch.c
 
diff --git a/tests/i915/gem_ctx_param.c b/tests/i915/gem_ctx_param.c
index 0bbc5effbf9f..acc1e6297750 100644
--- a/tests/i915/gem_ctx_param.c
+++ b/tests/i915/gem_ctx_param.c
@@ -294,11 +294,13 @@  igt_main
 			set_priority(fd);
 	}
 
+	/* I915_CONTEXT_PARAM_SSEU tests are located in gem_ctx_sseu.c */
+
 	/* NOTE: This testcase intentionally tests for the next free parameter
 	 * to catch ABI extensions. Don't "fix" this testcase without adding all
 	 * the tests for the new param first.
 	 */
-	arg.param = I915_CONTEXT_PARAM_PRIORITY + 1;
+	arg.param = I915_CONTEXT_PARAM_SSEU + 1;
 
 	igt_subtest("invalid-param-get") {
 		arg.ctx_id = ctx;
diff --git a/tests/i915/gem_ctx_sseu.c b/tests/i915/gem_ctx_sseu.c
new file mode 100644
index 000000000000..52600a928693
--- /dev/null
+++ b/tests/i915/gem_ctx_sseu.c
@@ -0,0 +1,541 @@ 
+/*
+ * Copyright © 2017-2018 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Lionel Landwerlin <lionel.g.landwerlin@intel.com>
+ *
+ */
+
+#include "igt.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <errno.h>
+#include <time.h>
+#include <sys/mman.h>
+#include <sys/wait.h>
+
+#include "igt_dummyload.h"
+#include "igt_perf.h"
+#include "igt_sysfs.h"
+#include "ioctl_wrappers.h"
+
+IGT_TEST_DESCRIPTION("Test context render powergating programming.");
+
+static unsigned int __intel_gen__, __intel_devid__;
+static uint64_t __slice_mask__, __subslice_mask__;
+static unsigned int __slice_count__, __subslice_count__;
+
+static uint64_t mask_minus_one(uint64_t mask)
+{
+	unsigned int i;
+
+	for (i = 0; i < (sizeof(mask) * 8 - 1); i++) {
+		if ((1ULL << i) & mask)
+			return mask & ~(1ULL << i);
+	}
+
+	igt_assert(0);
+	return 0;
+}
+
+static uint64_t mask_plus_one(uint64_t mask)
+{
+	unsigned int i;
+
+	for (i = 0; i < (sizeof(mask) * 8 - 1); i++) {
+		if (((1ULL << i) & mask) == 0)
+			return mask | (1ULL << i);
+	}
+
+	igt_assert(0);
+	return 0;
+}
+
+static uint64_t mask_minus(uint64_t mask, int n)
+{
+	unsigned int i;
+
+	for (i = 0; i < n; i++)
+		mask = mask_minus_one(mask);
+
+	return mask;
+}
+
+static uint64_t mask_plus(uint64_t mask, int n)
+{
+	unsigned int i;
+
+	for (i = 0; i < n; i++)
+		mask = mask_plus_one(mask);
+
+	return mask;
+}
+
+static bool
+kernel_has_per_context_sseu_support(int fd)
+{
+	struct drm_i915_gem_context_param_sseu sseu = { };
+	struct drm_i915_gem_context_param arg = {
+		.param = I915_CONTEXT_PARAM_SSEU,
+		.size = sizeof(sseu),
+		.value = to_user_pointer(&sseu),
+	};
+	int ret;
+
+	if (__gem_context_get_param(fd, &arg))
+		return false;
+
+	arg.value = to_user_pointer(&sseu);
+
+	ret = __gem_context_set_param(fd, &arg);
+
+	igt_assert(ret == 0 || ret == -ENODEV || ret == -EINVAL);
+
+	return ret == 0;
+}
+
+static bool has_engine(int fd, unsigned int class, unsigned int instance)
+{
+	int pmu = perf_i915_open(I915_PMU_ENGINE_BUSY(class, instance));
+
+	if (pmu >= 0)
+		close(pmu);
+
+	return pmu >= 0;
+}
+
+/*
+ * Verify that invalid engines are rejected and valid ones are accepted.
+ */
+static void test_engines(int fd)
+{
+	struct drm_i915_gem_context_param_sseu sseu = { };
+	struct drm_i915_gem_context_param arg = {
+		.param = I915_CONTEXT_PARAM_SSEU,
+		.ctx_id = gem_context_create(fd),
+		.size = sizeof(sseu),
+		.value = to_user_pointer(&sseu)
+	};
+	unsigned int class, instance;
+	int last_with_engines;
+
+	/* get_param */
+
+	sseu.instance = -1; /* Assumed invalid. */
+	igt_assert_eq(__gem_context_get_param(fd, &arg), -EINVAL);
+
+	sseu.class = I915_ENGINE_CLASS_INVALID; /* Both invalid. */
+	igt_assert_eq(__gem_context_get_param(fd, &arg), -EINVAL);
+
+	sseu.instance = 0; /* Class invalid. */
+	igt_assert_eq(__gem_context_get_param(fd, &arg), -EINVAL);
+	sseu.class = I915_ENGINE_CLASS_RENDER;
+
+	last_with_engines = -1;
+	for (class = 0; class < ~0; class++) {
+		for (instance = 0; instance < ~0; instance++) {
+			int ret;
+
+			sseu.class = class;
+			sseu.instance = instance;
+
+			ret = __gem_context_get_param(fd, &arg);
+
+			if (has_engine(fd, class, instance)) {
+				igt_assert_eq(ret, 0);
+				last_with_engines = class;
+			} else {
+				igt_assert_eq(ret, -EINVAL);
+				if (instance > 8) /* Skip over some instance holes. */
+					break;
+			}
+		}
+
+		if (class - last_with_engines > 8) /* Skip over some class holes. */
+			break;
+	}
+
+	/*
+	 * Get some proper values before trying to reprogram them onto
+	 * an invalid engine.
+	 */
+	sseu.class = 0;
+	sseu.instance = 0;
+	gem_context_get_param(fd, &arg);
+
+	/* set_param */
+
+	sseu.instance = -1; /* Assumed invalid. */
+	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
+
+	sseu.class = I915_ENGINE_CLASS_INVALID; /* Both invalid. */
+	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
+
+	sseu.instance = 0; /* Class invalid. */
+	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
+
+	last_with_engines = -1;
+	for (class = 0; class < ~0; class++) {
+		for (instance = 0; instance < ~0; instance++) {
+			int ret;
+
+			sseu.class = class;
+			sseu.instance = instance;
+
+			ret = __gem_context_set_param(fd, &arg);
+
+			if (has_engine(fd, class, instance)) {
+				igt_assert(ret == 0 || ret == -ENODEV);
+				last_with_engines = class;
+			} else {
+				igt_assert_eq(ret, -EINVAL);
+				if (instance > 8) /* Skip over some instance holes. */
+					break;
+			}
+		}
+
+		if (class - last_with_engines > 8) /* Skip over some class holes. */
+			break;
+	}
+
+	gem_context_destroy(fd, arg.ctx_id);
+}
+
+/*
+ * Verify that invalid arguments are rejected.
+ */
+static void
+test_invalid_args(int fd)
+{
+	struct drm_i915_gem_context_param arg = {
+		.param = I915_CONTEXT_PARAM_SSEU,
+		.ctx_id = gem_context_create(fd),
+	};
+	struct drm_i915_gem_context_param_sseu sseu = { };
+	unsigned char *page[2];
+	unsigned char *addr;
+	unsigned int sz;
+
+	/* get param */
+
+	/* Invalid size. */
+	arg.size = 1;
+	igt_assert_eq(__gem_context_get_param(fd, &arg), -EINVAL);
+
+	/* Query size. */
+	arg.size = 0;
+	igt_assert_eq(__gem_context_get_param(fd, &arg), 0);
+	sz = arg.size;
+
+	/* Bad pointers. */
+	igt_assert_eq(__gem_context_get_param(fd, &arg), -EFAULT);
+	arg.value = -1;
+	igt_assert_eq(__gem_context_get_param(fd, &arg), -EFAULT);
+	arg.value = 1;
+	igt_assert_eq(__gem_context_get_param(fd, &arg), -EFAULT);
+
+	/* Unmapped. */
+	page[0] = mmap(0, 4096, PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
+	igt_assert(page[0] != MAP_FAILED);
+	memset(page[0], 0, sizeof(sseu));
+	munmap(page[0], 4096);
+	arg.value = to_user_pointer(page[0]);
+	igt_assert_eq(__gem_context_get_param(fd, &arg), -EFAULT);
+
+	/* Straddle into unmapped area. */
+	page[0] = mmap(0, 8192, PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
+	igt_assert(page[0] != MAP_FAILED);
+	munmap(page[0], 8192);
+	page[0] = mmap(page[0], 4096,
+		       PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
+	igt_assert(page[0] != MAP_FAILED);
+	memset(page[0], 0, sizeof(sseu));
+	page[1] = mmap((void *)((unsigned long)page[0] + 4096), 4096,
+		       PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
+	igt_assert(page[1] != MAP_FAILED);
+	memset(page[1], 0, sizeof(sseu));
+	munmap(page[1], 4096);
+	arg.value = to_user_pointer(page[1]) -
+		    sizeof(struct drm_i915_gem_context_param_sseu) + 4;
+	igt_assert_eq(__gem_context_get_param(fd, &arg), -EFAULT);
+	munmap(page[0], 4096);
+
+	/* Straddle into read-only area. */
+	page[0] = mmap(0, 8192, PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
+	igt_assert(page[0] != MAP_FAILED);
+	munmap(page[0], 8192);
+	page[0] = mmap(page[0], 4096,
+		       PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
+	igt_assert(page[0] != MAP_FAILED);
+	memset(page[0], 0, sizeof(sseu));
+	page[1] = mmap((void *)((unsigned long)page[0] + 4096), 4096,
+		       PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
+	igt_assert(page[1] != MAP_FAILED);
+	memset(page[1], 0, sizeof(sseu));
+	igt_assert(mprotect(page[1], 4096, PROT_READ) == 0);
+	arg.value = to_user_pointer(page[1] - sizeof(sseu) + 4);
+	igt_assert_eq(__gem_context_get_param(fd, &arg), -EFAULT);
+	munmap(page[0], 4096);
+	munmap(page[1], 4096);
+
+	/* set param */
+
+	/* Invalid sizes. */
+	arg.size = 1;
+	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
+
+	arg.size = 0;
+	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
+	arg.size = sz;
+
+	/* Bad pointers. */
+	arg.value = 0;
+	igt_assert_eq(__gem_context_set_param(fd, &arg), -EFAULT);
+	arg.value = -1;
+	igt_assert_eq(__gem_context_set_param(fd, &arg), -EFAULT);
+	arg.value = 1;
+	igt_assert_eq(__gem_context_set_param(fd, &arg), -EFAULT);
+
+	/* Get valid SSEU. */
+	arg.value = to_user_pointer(&sseu);
+	igt_assert_eq(__gem_context_get_param(fd, &arg), 0);
+
+	/* Invalid MBZ. */
+	sseu.flags = -1;
+	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
+	sseu.rsvd = -1;
+	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
+	sseu.flags = 0;
+	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
+	sseu.rsvd = 0;
+
+	/* Unmapped. */
+	page[0] = mmap(0, 4096, PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
+	igt_assert(page[0] != MAP_FAILED);
+	memcpy(page[0], &sseu, sizeof(sseu));
+	munmap(page[0], 4096);
+	arg.value = to_user_pointer(page[0]);
+	igt_assert_eq(__gem_context_set_param(fd, &arg), -EFAULT);
+
+	/* Straddle into unmapped area. */
+	page[0] = mmap(0, 8192, PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
+	igt_assert(page[0] != MAP_FAILED);
+	munmap(page[0], 8192);
+	page[0] = mmap(page[0], 4096,
+		       PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
+	igt_assert(page[0] != MAP_FAILED);
+	page[1] = mmap((void *)((unsigned long)page[0] + 4096), 4096,
+		       PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
+	igt_assert(page[1] != MAP_FAILED);
+	addr = page[1] - sizeof(sseu) + 4;
+	memcpy(addr, &sseu, sizeof(sseu));
+	munmap(page[1], 4096);
+	arg.value = to_user_pointer(addr);
+	igt_assert_eq(__gem_context_set_param(fd, &arg), -EFAULT);
+	munmap(page[0], 4096);
+
+	gem_context_destroy(fd, arg.ctx_id);
+}
+
+/*
+ * Verify that ggtt mapped area can be used as the sseu pointer.
+ */
+static void
+test_ggtt_args(int fd)
+{
+	struct drm_i915_gem_context_param_sseu *sseu;
+	struct drm_i915_gem_context_param arg = {
+		.param = I915_CONTEXT_PARAM_SSEU,
+		.ctx_id = gem_context_create(fd),
+		.size = sizeof(*sseu),
+	};
+	uint32_t bo;
+
+	bo = gem_create(fd, 4096);
+	arg.value = to_user_pointer(gem_mmap__gtt(fd, bo, 4096,
+						  PROT_READ | PROT_WRITE));
+
+	igt_assert_eq(__gem_context_get_param(fd, &arg), 0);
+	igt_assert_eq(__gem_context_set_param(fd, &arg), 0);
+
+	munmap((void *)arg.value, 4096);
+	gem_close(fd, bo);
+	gem_context_destroy(fd, arg.ctx_id);
+}
+
+/*
+ * Verify that invalid SSEU values are rejected.
+ */
+static void
+test_invalid_sseu(int fd)
+{
+	struct drm_i915_gem_context_param_sseu device_sseu = { };
+	struct drm_i915_gem_context_param_sseu sseu = { };
+	struct drm_i915_gem_context_param arg = {
+		.param = I915_CONTEXT_PARAM_SSEU,
+		.ctx_id = gem_context_create(fd),
+		.size = sizeof(sseu),
+	};
+	unsigned int i;
+
+	/* Fetch the device defaults. */
+	arg.value = to_user_pointer(&device_sseu);
+	gem_context_get_param(fd, &arg);
+
+	arg.value = to_user_pointer(&sseu);
+
+	/* Try all slice masks known to be invalid. */
+	sseu = device_sseu;
+	for (i = 1; i <= (8 - __slice_count__); i++) {
+		sseu.slice_mask = mask_plus(__slice_mask__, i);
+		igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
+	}
+
+	/* 0 slices. */
+	sseu.slice_mask = 0;
+	igt_assert_eq(-EINVAL, __gem_context_set_param(fd, &arg));
+
+	/* Try all subslice masks known to be invalid. */
+	sseu = device_sseu;
+	for (i = 1; i <= (8 - __subslice_count__); i++) {
+		sseu.subslice_mask = mask_plus(__subslice_mask__, i);
+		igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
+	}
+
+	/* 0 subslices. */
+	sseu.subslice_mask = 0;
+	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
+
+	/* Try number of EUs superior to the max available. */
+	sseu = device_sseu;
+	sseu.min_eus_per_subslice = device_sseu.max_eus_per_subslice + 1;
+	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
+
+	sseu = device_sseu;
+	sseu.max_eus_per_subslice = device_sseu.max_eus_per_subslice + 1;
+	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
+
+	/* Try to program 0 max EUs. */
+	sseu = device_sseu;
+	sseu.max_eus_per_subslice = 0;
+	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
+
+	/* Min > max */
+	sseu = device_sseu;
+	sseu.min_eus_per_subslice = sseu.max_eus_per_subslice;
+	sseu.max_eus_per_subslice = 1;
+	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
+
+	if (__intel_gen__ != 11)
+		goto out;
+
+	/* Subset of subslices but slice mask greater than one. */
+	if (__slice_count__ > 1) {
+		sseu = device_sseu;
+		sseu.subslice_mask = mask_minus_one(sseu.subslice_mask);
+		igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
+	}
+
+	/* Odd subslices above four. */
+	sseu = device_sseu;
+	sseu.slice_mask = 0x1;
+	sseu.subslice_mask = mask_minus_one(sseu.subslice_mask);
+	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
+
+	/* More than half subslices with one slice. */
+	sseu = device_sseu;
+	sseu.slice_mask = 0x1;
+	sseu.subslice_mask = mask_minus(sseu.subslice_mask,
+					__subslice_count__ / 2 - 1);
+	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
+
+	/* VME */
+
+	/* Slice count between one and max. */
+	if (__slice_count__ > 2) {
+		sseu = device_sseu;
+		sseu.slice_mask = mask_minus_one(sseu.slice_mask);
+		igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
+	}
+
+	/* Less than half subslices with one slice. */
+	sseu = device_sseu;
+	sseu.slice_mask = 0x1;
+	sseu.subslice_mask = mask_minus(sseu.subslice_mask,
+					__subslice_count__ / 2 + 1);
+	igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
+
+out:
+	gem_context_destroy(fd, arg.ctx_id);
+}
+
+igt_main
+{
+	int fd;
+
+	igt_fixture {
+		fd = drm_open_driver(DRIVER_INTEL);
+		igt_require_gem(fd);
+
+		__intel_devid__ = intel_get_drm_devid(fd);
+		__intel_gen__ = intel_gen(__intel_devid__);
+
+		igt_require(kernel_has_per_context_sseu_support(fd));
+	}
+
+	igt_subtest_group {
+		igt_fixture {
+			drm_i915_getparam_t gp;
+
+			gp.param = I915_PARAM_SLICE_MASK;
+			gp.value = (int *) &__slice_mask__;
+			do_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
+			__slice_count__ = __builtin_popcount(__slice_mask__);
+
+			gp.param = I915_PARAM_SUBSLICE_MASK;
+			gp.value = (int *) &__subslice_mask__;
+			do_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
+			__subslice_count__ =
+				__builtin_popcount(__subslice_mask__);
+		}
+
+		igt_subtest("invalid-args")
+			test_invalid_args(fd);
+
+		igt_subtest("invalid-sseu")
+			test_invalid_sseu(fd);
+
+		igt_subtest("ggtt-args")
+			test_ggtt_args(fd);
+
+		igt_subtest("engines")
+			test_engines(fd);
+	}
+
+	igt_fixture {
+		close(fd);
+	}
+}
diff --git a/tests/meson.build b/tests/meson.build
index b8a6e61b3404..f41f724af8af 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -288,6 +288,14 @@  foreach prog : gem_progs + gen3_progs
 	test_list += prog
 endforeach
 
+test_executables += executable('gem_ctx_sseu',
+	   join_paths('i915', 'gem_ctx_sseu.c'),
+	   dependencies : test_deps + [ lib_igt_perf ],
+	   install_dir : libexecdir,
+	   install_rpath : libexecdir_rpathdir,
+	   install : true)
+test_progs += 'gem_ctx_sseu'
+
 test_executables += executable('gem_eio',
 	   join_paths('i915', 'gem_eio.c'),
 	   dependencies : test_deps + [ realtime ],