[v2] igt/gem_exec_schedule: Exercise "deep" preemption

Submitted by Chris Wilson on Feb. 26, 2018, 3:26 p.m.

Details

Message ID 20180226152658.15609-1-chris@chris-wilson.co.uk
State New
Series "igt/gem_exec_schedule: Exercise "deep" preemption"
Headers show

Commit Message

Chris Wilson Feb. 26, 2018, 3:26 p.m.
In investigating the issue with having to force preemption within the
executing ELSP[], we want to trigger preemption between all elements of
that array. To that end, we issue a series of requests with different
priorities to fill the in-flight ELSP[] and then demand preemption into
the middle of that series. One can think of even more complicated
reordering requirements of ELSP[], trying to switch between every
possible combination of permutations. Rather than check all 2 billion
combinations, be content with a few.

v2: Add a different pattern for queued requests. Not only do we need to
inject a request into the middle of a single context with a queue of
different priority contexts, but we also want a queue of different
contexts, as they have different patterns of ELSP[] behaviour.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
---
 tests/gem_exec_schedule.c | 188 +++++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 169 insertions(+), 19 deletions(-)

Patch hide | download patch | download mbox

diff --git a/tests/gem_exec_schedule.c b/tests/gem_exec_schedule.c
index 8a69ab5c..7f1bda42 100644
--- a/tests/gem_exec_schedule.c
+++ b/tests/gem_exec_schedule.c
@@ -373,13 +373,78 @@  static void preempt(int fd, unsigned ring, unsigned flags)
 	gem_close(fd, result);
 }
 
-static void preempt_other(int fd, unsigned ring)
+#define CHAIN 0x1
+#define CONTEXTS 0x2
+
+static igt_spin_t *__noise(int fd, uint32_t ctx, int prio, igt_spin_t *spin)
+{
+	unsigned other;
+
+	gem_context_set_priority(fd, ctx, prio);
+
+	for_each_physical_engine(fd, other) {
+		if (spin == NULL) {
+			spin = __igt_spin_batch_new(fd, ctx, other, 0);
+		} else {
+			struct drm_i915_gem_exec_object2 obj = {
+				.handle = spin->handle,
+			};
+			struct drm_i915_gem_execbuffer2 eb = {
+				.buffer_count = 1,
+				.buffers_ptr = to_user_pointer(&obj),
+				.rsvd1 = ctx,
+				.flags = other,
+			};
+			gem_execbuf(fd, &eb);
+		}
+	}
+
+	return spin;
+}
+
+static void __preempt_other(int fd,
+			    uint32_t *ctx,
+			    unsigned int target, unsigned int primary,
+			    unsigned flags)
 {
 	uint32_t result = gem_create(fd, 4096);
 	uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
-	igt_spin_t *spin[MAX_ENGINES];
-	unsigned int other;
-	unsigned int n, i;
+	unsigned int n, i, other;
+
+	n = 0;
+	store_dword(fd, ctx[LO], primary,
+		    result, (n + 1)*sizeof(uint32_t), n + 1,
+		    0, I915_GEM_DOMAIN_RENDER);
+	n++;
+
+	if (flags & CHAIN) {
+		for_each_physical_engine(fd, other) {
+			store_dword(fd, ctx[LO], other,
+				    result, (n + 1)*sizeof(uint32_t), n + 1,
+				    0, I915_GEM_DOMAIN_RENDER);
+			n++;
+		}
+	}
+
+	store_dword(fd, ctx[HI], target,
+		    result, (n + 1)*sizeof(uint32_t), n + 1,
+		    0, I915_GEM_DOMAIN_RENDER);
+
+	igt_debugfs_dump(fd, "i915_engine_info");
+	gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
+
+	n++;
+	for (i = 0; i <= n; i++)
+		igt_assert_eq_u32(ptr[i], i);
+
+	munmap(ptr, 4096);
+	gem_close(fd, result);
+}
+
+static void preempt_other(int fd, unsigned ring, unsigned int flags)
+{
+	unsigned int primary;
+	igt_spin_t *spin = NULL;
 	uint32_t ctx[3];
 
 	/* On each engine, insert
@@ -396,36 +461,97 @@  static void preempt_other(int fd, unsigned ring)
 	gem_context_set_priority(fd, ctx[LO], MIN_PRIO);
 
 	ctx[NOISE] = gem_context_create(fd);
+	spin = __noise(fd, ctx[NOISE], 0, NULL);
 
 	ctx[HI] = gem_context_create(fd);
 	gem_context_set_priority(fd, ctx[HI], MAX_PRIO);
 
+	for_each_physical_engine(fd, primary) {
+		igt_debug("Primary engine: %s\n", e__->name);
+		__preempt_other(fd, ctx, ring, primary, flags);
+
+	}
+
+	igt_assert(gem_bo_busy(fd, spin->handle));
+	igt_spin_batch_free(fd, spin);
+
+	gem_context_destroy(fd, ctx[LO]);
+	gem_context_destroy(fd, ctx[NOISE]);
+	gem_context_destroy(fd, ctx[HI]);
+}
+
+static void __preempt_queue(int fd,
+			    unsigned target, unsigned primary,
+			    unsigned depth, unsigned flags)
+{
+	uint32_t result = gem_create(fd, 4096);
+	uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
+	igt_spin_t *above = NULL, *below = NULL;
+	unsigned int other, n, i;
+	int prio = MAX_PRIO;
+	uint32_t ctx[3] = {
+		gem_context_create(fd),
+		gem_context_create(fd),
+		gem_context_create(fd),
+	};
+
+	for (n = 0; n < depth; n++) {
+		if (flags & CONTEXTS) {
+			gem_context_destroy(fd, ctx[NOISE]);
+			ctx[NOISE] = gem_context_create(fd);
+		}
+		above = __noise(fd, ctx[NOISE], prio--, above);
+	}
+
+	gem_context_set_priority(fd, ctx[HI], prio--);
+
+	for (; n < MAX_ELSP_QLEN; n++) {
+		if (flags & CONTEXTS) {
+			gem_context_destroy(fd, ctx[NOISE]);
+			ctx[NOISE] = gem_context_create(fd);
+		}
+		below = __noise(fd, ctx[NOISE], prio--, below);
+	}
+
+	gem_context_set_priority(fd, ctx[LO], prio--);
+
 	n = 0;
-	for_each_physical_engine(fd, other) {
-		igt_assert(n < ARRAY_SIZE(spin));
+	store_dword(fd, ctx[LO], primary,
+		    result, (n + 1)*sizeof(uint32_t), n + 1,
+		    0, I915_GEM_DOMAIN_RENDER);
+	n++;
 
-		spin[n] = __igt_spin_batch_new(fd, ctx[NOISE], other, 0);
-		store_dword(fd, ctx[LO], other,
-			    result, (n + 1)*sizeof(uint32_t), n + 1,
-			    0, I915_GEM_DOMAIN_RENDER);
-		n++;
+	if (flags & CHAIN) {
+		for_each_physical_engine(fd, other) {
+			store_dword(fd, ctx[LO], other,
+				    result, (n + 1)*sizeof(uint32_t), n + 1,
+				    0, I915_GEM_DOMAIN_RENDER);
+			n++;
+		}
 	}
-	store_dword(fd, ctx[HI], ring,
+
+	store_dword(fd, ctx[HI], target,
 		    result, (n + 1)*sizeof(uint32_t), n + 1,
 		    0, I915_GEM_DOMAIN_RENDER);
 
 	igt_debugfs_dump(fd, "i915_engine_info");
-	gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
 
-	for (i = 0; i < n; i++) {
-		igt_assert(gem_bo_busy(fd, spin[i]->handle));
-		igt_spin_batch_free(fd, spin[i]);
+	if (above) {
+		igt_assert(gem_bo_busy(fd, above->handle));
+		igt_spin_batch_free(fd, above);
 	}
 
+	gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
+
 	n++;
 	for (i = 0; i <= n; i++)
 		igt_assert_eq_u32(ptr[i], i);
 
+	if (below) {
+		igt_assert(gem_bo_busy(fd, below->handle));
+		igt_spin_batch_free(fd, below);
+	}
+
 	gem_context_destroy(fd, ctx[LO]);
 	gem_context_destroy(fd, ctx[NOISE]);
 	gem_context_destroy(fd, ctx[HI]);
@@ -434,6 +560,16 @@  static void preempt_other(int fd, unsigned ring)
 	gem_close(fd, result);
 }
 
+static void preempt_queue(int fd, unsigned ring, unsigned int flags)
+{
+	unsigned other;
+
+	for_each_physical_engine(fd, other) {
+		for (unsigned depth = 0; depth <= MAX_ELSP_QLEN; depth++)
+			__preempt_queue(fd, ring, other, depth, flags);
+	}
+}
+
 static void preempt_self(int fd, unsigned ring)
 {
 	uint32_t result = gem_create(fd, 4096);
@@ -981,12 +1117,26 @@  igt_main
 					igt_subtest_f("preempt-contexts-%s", e->name)
 						preempt(fd, e->exec_id | e->flags, NEW_CTX);
 
-					igt_subtest_f("preempt-other-%s", e->name)
-						preempt_other(fd, e->exec_id | e->flags);
-
 					igt_subtest_f("preempt-self-%s", e->name)
 						preempt_self(fd, e->exec_id | e->flags);
 
+					igt_subtest_f("preempt-other-%s", e->name)
+						preempt_other(fd, e->exec_id | e->flags, 0);
+
+					igt_subtest_f("preempt-other-chain-%s", e->name)
+						preempt_other(fd, e->exec_id | e->flags, CHAIN);
+
+					igt_subtest_f("preempt-queue-%s", e->name)
+						preempt_queue(fd, e->exec_id | e->flags, 0);
+
+					igt_subtest_f("preempt-queue-chain-%s", e->name)
+						preempt_queue(fd, e->exec_id | e->flags, CHAIN);
+					igt_subtest_f("preempt-contexts-%s", e->name)
+						preempt_queue(fd, e->exec_id | e->flags, CONTEXTS);
+
+					igt_subtest_f("preempt-contexts-chain-%s", e->name)
+						preempt_queue(fd, e->exec_id | e->flags, CONTEXTS | CHAIN);
+
 					igt_subtest_group {
 						igt_hang_t hang;
 

Comments

Chris Wilson Feb. 27, 2018, 10:44 a.m.
Quoting Chris Wilson (2018-02-26 15:26:58)
> In investigating the issue with having to force preemption within the
> executing ELSP[], we want to trigger preemption between all elements of
> that array. To that end, we issue a series of requests with different
> priorities to fill the in-flight ELSP[] and then demand preemption into
> the middle of that series. One can think of even more complicated
> reordering requirements of ELSP[], trying to switch between every
> possible combination of permutations. Rather than check all 2 billion
> combinations, be content with a few.
> 
> v2: Add a different pattern for queued requests. Not only do we need to
> inject a request into the middle of a single context with a queue of
> different priority contexts, but we also want a queue of different
> contexts, as they have different patterns of ELSP[] behaviour.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Mika Kuoppala <mika.kuoppala@intel.com>
> Cc: Michał Winiarski <michal.winiarski@intel.com>

Just nod. :)

Don't mention fMBT.
-Chris

> ---
>  tests/gem_exec_schedule.c | 188 +++++++++++++++++++++++++++++++++++++++++-----
>  1 file changed, 169 insertions(+), 19 deletions(-)
> 
> diff --git a/tests/gem_exec_schedule.c b/tests/gem_exec_schedule.c
> index 8a69ab5c..7f1bda42 100644
> --- a/tests/gem_exec_schedule.c
> +++ b/tests/gem_exec_schedule.c
> @@ -373,13 +373,78 @@ static void preempt(int fd, unsigned ring, unsigned flags)
>         gem_close(fd, result);
>  }
>  
> -static void preempt_other(int fd, unsigned ring)
> +#define CHAIN 0x1
> +#define CONTEXTS 0x2
> +
> +static igt_spin_t *__noise(int fd, uint32_t ctx, int prio, igt_spin_t *spin)
> +{
> +       unsigned other;
> +
> +       gem_context_set_priority(fd, ctx, prio);
> +
> +       for_each_physical_engine(fd, other) {
> +               if (spin == NULL) {
> +                       spin = __igt_spin_batch_new(fd, ctx, other, 0);
> +               } else {
> +                       struct drm_i915_gem_exec_object2 obj = {
> +                               .handle = spin->handle,
> +                       };
> +                       struct drm_i915_gem_execbuffer2 eb = {
> +                               .buffer_count = 1,
> +                               .buffers_ptr = to_user_pointer(&obj),
> +                               .rsvd1 = ctx,
> +                               .flags = other,
> +                       };
> +                       gem_execbuf(fd, &eb);
> +               }
> +       }
> +
> +       return spin;
> +}
> +
> +static void __preempt_other(int fd,
> +                           uint32_t *ctx,
> +                           unsigned int target, unsigned int primary,
> +                           unsigned flags)
>  {
>         uint32_t result = gem_create(fd, 4096);
>         uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
> -       igt_spin_t *spin[MAX_ENGINES];
> -       unsigned int other;
> -       unsigned int n, i;
> +       unsigned int n, i, other;
> +
> +       n = 0;
> +       store_dword(fd, ctx[LO], primary,
> +                   result, (n + 1)*sizeof(uint32_t), n + 1,
> +                   0, I915_GEM_DOMAIN_RENDER);
> +       n++;
> +
> +       if (flags & CHAIN) {
> +               for_each_physical_engine(fd, other) {
> +                       store_dword(fd, ctx[LO], other,
> +                                   result, (n + 1)*sizeof(uint32_t), n + 1,
> +                                   0, I915_GEM_DOMAIN_RENDER);
> +                       n++;
> +               }
> +       }
> +
> +       store_dword(fd, ctx[HI], target,
> +                   result, (n + 1)*sizeof(uint32_t), n + 1,
> +                   0, I915_GEM_DOMAIN_RENDER);
> +
> +       igt_debugfs_dump(fd, "i915_engine_info");
> +       gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
> +
> +       n++;
> +       for (i = 0; i <= n; i++)
> +               igt_assert_eq_u32(ptr[i], i);
> +
> +       munmap(ptr, 4096);
> +       gem_close(fd, result);
> +}
> +
> +static void preempt_other(int fd, unsigned ring, unsigned int flags)
> +{
> +       unsigned int primary;
> +       igt_spin_t *spin = NULL;
>         uint32_t ctx[3];
>  
>         /* On each engine, insert
> @@ -396,36 +461,97 @@ static void preempt_other(int fd, unsigned ring)
>         gem_context_set_priority(fd, ctx[LO], MIN_PRIO);
>  
>         ctx[NOISE] = gem_context_create(fd);
> +       spin = __noise(fd, ctx[NOISE], 0, NULL);
>  
>         ctx[HI] = gem_context_create(fd);
>         gem_context_set_priority(fd, ctx[HI], MAX_PRIO);
>  
> +       for_each_physical_engine(fd, primary) {
> +               igt_debug("Primary engine: %s\n", e__->name);
> +               __preempt_other(fd, ctx, ring, primary, flags);
> +
> +       }
> +
> +       igt_assert(gem_bo_busy(fd, spin->handle));
> +       igt_spin_batch_free(fd, spin);
> +
> +       gem_context_destroy(fd, ctx[LO]);
> +       gem_context_destroy(fd, ctx[NOISE]);
> +       gem_context_destroy(fd, ctx[HI]);
> +}
> +
> +static void __preempt_queue(int fd,
> +                           unsigned target, unsigned primary,
> +                           unsigned depth, unsigned flags)
> +{
> +       uint32_t result = gem_create(fd, 4096);
> +       uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
> +       igt_spin_t *above = NULL, *below = NULL;
> +       unsigned int other, n, i;
> +       int prio = MAX_PRIO;
> +       uint32_t ctx[3] = {
> +               gem_context_create(fd),
> +               gem_context_create(fd),
> +               gem_context_create(fd),
> +       };
> +
> +       for (n = 0; n < depth; n++) {
> +               if (flags & CONTEXTS) {
> +                       gem_context_destroy(fd, ctx[NOISE]);
> +                       ctx[NOISE] = gem_context_create(fd);
> +               }
> +               above = __noise(fd, ctx[NOISE], prio--, above);
> +       }
> +
> +       gem_context_set_priority(fd, ctx[HI], prio--);
> +
> +       for (; n < MAX_ELSP_QLEN; n++) {
> +               if (flags & CONTEXTS) {
> +                       gem_context_destroy(fd, ctx[NOISE]);
> +                       ctx[NOISE] = gem_context_create(fd);
> +               }
> +               below = __noise(fd, ctx[NOISE], prio--, below);
> +       }
> +
> +       gem_context_set_priority(fd, ctx[LO], prio--);
> +
>         n = 0;
> -       for_each_physical_engine(fd, other) {
> -               igt_assert(n < ARRAY_SIZE(spin));
> +       store_dword(fd, ctx[LO], primary,
> +                   result, (n + 1)*sizeof(uint32_t), n + 1,
> +                   0, I915_GEM_DOMAIN_RENDER);
> +       n++;
>  
> -               spin[n] = __igt_spin_batch_new(fd, ctx[NOISE], other, 0);
> -               store_dword(fd, ctx[LO], other,
> -                           result, (n + 1)*sizeof(uint32_t), n + 1,
> -                           0, I915_GEM_DOMAIN_RENDER);
> -               n++;
> +       if (flags & CHAIN) {
> +               for_each_physical_engine(fd, other) {
> +                       store_dword(fd, ctx[LO], other,
> +                                   result, (n + 1)*sizeof(uint32_t), n + 1,
> +                                   0, I915_GEM_DOMAIN_RENDER);
> +                       n++;
> +               }
>         }
> -       store_dword(fd, ctx[HI], ring,
> +
> +       store_dword(fd, ctx[HI], target,
>                     result, (n + 1)*sizeof(uint32_t), n + 1,
>                     0, I915_GEM_DOMAIN_RENDER);
>  
>         igt_debugfs_dump(fd, "i915_engine_info");
> -       gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
>  
> -       for (i = 0; i < n; i++) {
> -               igt_assert(gem_bo_busy(fd, spin[i]->handle));
> -               igt_spin_batch_free(fd, spin[i]);
> +       if (above) {
> +               igt_assert(gem_bo_busy(fd, above->handle));
> +               igt_spin_batch_free(fd, above);
>         }
>  
> +       gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
> +
>         n++;
>         for (i = 0; i <= n; i++)
>                 igt_assert_eq_u32(ptr[i], i);
>  
> +       if (below) {
> +               igt_assert(gem_bo_busy(fd, below->handle));
> +               igt_spin_batch_free(fd, below);
> +       }
> +
>         gem_context_destroy(fd, ctx[LO]);
>         gem_context_destroy(fd, ctx[NOISE]);
>         gem_context_destroy(fd, ctx[HI]);
> @@ -434,6 +560,16 @@ static void preempt_other(int fd, unsigned ring)
>         gem_close(fd, result);
>  }
>  
> +static void preempt_queue(int fd, unsigned ring, unsigned int flags)
> +{
> +       unsigned other;
> +
> +       for_each_physical_engine(fd, other) {
> +               for (unsigned depth = 0; depth <= MAX_ELSP_QLEN; depth++)
> +                       __preempt_queue(fd, ring, other, depth, flags);
> +       }
> +}
> +
>  static void preempt_self(int fd, unsigned ring)
>  {
>         uint32_t result = gem_create(fd, 4096);
> @@ -981,12 +1117,26 @@ igt_main
>                                         igt_subtest_f("preempt-contexts-%s", e->name)
>                                                 preempt(fd, e->exec_id | e->flags, NEW_CTX);
>  
> -                                       igt_subtest_f("preempt-other-%s", e->name)
> -                                               preempt_other(fd, e->exec_id | e->flags);
> -
>                                         igt_subtest_f("preempt-self-%s", e->name)
>                                                 preempt_self(fd, e->exec_id | e->flags);
>  
> +                                       igt_subtest_f("preempt-other-%s", e->name)
> +                                               preempt_other(fd, e->exec_id | e->flags, 0);
> +
> +                                       igt_subtest_f("preempt-other-chain-%s", e->name)
> +                                               preempt_other(fd, e->exec_id | e->flags, CHAIN);
> +
> +                                       igt_subtest_f("preempt-queue-%s", e->name)
> +                                               preempt_queue(fd, e->exec_id | e->flags, 0);
> +
> +                                       igt_subtest_f("preempt-queue-chain-%s", e->name)
> +                                               preempt_queue(fd, e->exec_id | e->flags, CHAIN);
> +                                       igt_subtest_f("preempt-contexts-%s", e->name)
> +                                               preempt_queue(fd, e->exec_id | e->flags, CONTEXTS);
> +
> +                                       igt_subtest_f("preempt-contexts-chain-%s", e->name)
> +                                               preempt_queue(fd, e->exec_id | e->flags, CONTEXTS | CHAIN);
> +
>                                         igt_subtest_group {
>                                                 igt_hang_t hang;
>  
> -- 
> 2.16.2
>
Mika Kuoppala Feb. 27, 2018, 1:25 p.m.
Chris Wilson <chris@chris-wilson.co.uk> writes:

> In investigating the issue with having to force preemption within the
> executing ELSP[], we want to trigger preemption between all elements of
> that array. To that end, we issue a series of requests with different
> priorities to fill the in-flight ELSP[] and then demand preemption into
> the middle of that series. One can think of even more complicated
> reordering requirements of ELSP[], trying to switch between every
> possible combination of permutations. Rather than check all 2 billion
> combinations, be content with a few.
>
> v2: Add a different pattern for queued requests. Not only do we need to
> inject a request into the middle of a single context with a queue of
> different priority contexts, but we also want a queue of different
> contexts, as they have different patterns of ELSP[] behaviour.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Mika Kuoppala <mika.kuoppala@intel.com>
> Cc: Michał Winiarski <michal.winiarski@intel.com>
> ---
>  tests/gem_exec_schedule.c | 188 +++++++++++++++++++++++++++++++++++++++++-----
>  1 file changed, 169 insertions(+), 19 deletions(-)
>
> diff --git a/tests/gem_exec_schedule.c b/tests/gem_exec_schedule.c
> index 8a69ab5c..7f1bda42 100644
> --- a/tests/gem_exec_schedule.c
> +++ b/tests/gem_exec_schedule.c
> @@ -373,13 +373,78 @@ static void preempt(int fd, unsigned ring, unsigned flags)
>  	gem_close(fd, result);
>  }
>  
> -static void preempt_other(int fd, unsigned ring)
> +#define CHAIN 0x1
> +#define CONTEXTS 0x2
> +
> +static igt_spin_t *__noise(int fd, uint32_t ctx, int prio, igt_spin_t *spin)
> +{
> +	unsigned other;
> +
> +	gem_context_set_priority(fd, ctx, prio);
> +
> +	for_each_physical_engine(fd, other) {
> +		if (spin == NULL) {
> +			spin = __igt_spin_batch_new(fd, ctx, other, 0);
> +		} else {
> +			struct drm_i915_gem_exec_object2 obj = {
> +				.handle = spin->handle,
> +			};
> +			struct drm_i915_gem_execbuffer2 eb = {
> +				.buffer_count = 1,
> +				.buffers_ptr = to_user_pointer(&obj),
> +				.rsvd1 = ctx,
> +				.flags = other,
> +			};
> +			gem_execbuf(fd, &eb);
> +		}
> +	}
> +
> +	return spin;
> +}
> +
> +static void __preempt_other(int fd,
> +			    uint32_t *ctx,
> +			    unsigned int target, unsigned int primary,
> +			    unsigned flags)
>  {
>  	uint32_t result = gem_create(fd, 4096);
>  	uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
> -	igt_spin_t *spin[MAX_ENGINES];
> -	unsigned int other;
> -	unsigned int n, i;
> +	unsigned int n, i, other;
> +
> +	n = 0;
> +	store_dword(fd, ctx[LO], primary,
> +		    result, (n + 1)*sizeof(uint32_t), n + 1,
> +		    0, I915_GEM_DOMAIN_RENDER);
> +	n++;
> +
> +	if (flags & CHAIN) {
> +		for_each_physical_engine(fd, other) {
> +			store_dword(fd, ctx[LO], other,
> +				    result, (n + 1)*sizeof(uint32_t), n + 1,
> +				    0, I915_GEM_DOMAIN_RENDER);
> +			n++;
> +		}
> +	}
> +
> +	store_dword(fd, ctx[HI], target,
> +		    result, (n + 1)*sizeof(uint32_t), n + 1,
> +		    0, I915_GEM_DOMAIN_RENDER);
> +
> +	igt_debugfs_dump(fd, "i915_engine_info");
> +	gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
> +
> +	n++;
> +	for (i = 0; i <= n; i++)
> +		igt_assert_eq_u32(ptr[i], i);
> +
> +	munmap(ptr, 4096);
> +	gem_close(fd, result);
> +}
> +
> +static void preempt_other(int fd, unsigned ring, unsigned int flags)
> +{
> +	unsigned int primary;
> +	igt_spin_t *spin = NULL;
>  	uint32_t ctx[3];
>  
>  	/* On each engine, insert
> @@ -396,36 +461,97 @@ static void preempt_other(int fd, unsigned ring)
>  	gem_context_set_priority(fd, ctx[LO], MIN_PRIO);
>  
>  	ctx[NOISE] = gem_context_create(fd);
> +	spin = __noise(fd, ctx[NOISE], 0, NULL);
>  
>  	ctx[HI] = gem_context_create(fd);
>  	gem_context_set_priority(fd, ctx[HI], MAX_PRIO);
>  
> +	for_each_physical_engine(fd, primary) {
> +		igt_debug("Primary engine: %s\n", e__->name);
> +		__preempt_other(fd, ctx, ring, primary, flags);
> +
> +	}
> +
> +	igt_assert(gem_bo_busy(fd, spin->handle));
> +	igt_spin_batch_free(fd, spin);
> +
> +	gem_context_destroy(fd, ctx[LO]);
> +	gem_context_destroy(fd, ctx[NOISE]);
> +	gem_context_destroy(fd, ctx[HI]);
> +}
> +
> +static void __preempt_queue(int fd,
> +			    unsigned target, unsigned primary,
> +			    unsigned depth, unsigned flags)
> +{
> +	uint32_t result = gem_create(fd, 4096);
> +	uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
> +	igt_spin_t *above = NULL, *below = NULL;
> +	unsigned int other, n, i;
> +	int prio = MAX_PRIO;
> +	uint32_t ctx[3] = {
> +		gem_context_create(fd),
> +		gem_context_create(fd),
> +		gem_context_create(fd),
> +	};
> +
> +	for (n = 0; n < depth; n++) {
> +		if (flags & CONTEXTS) {
> +			gem_context_destroy(fd, ctx[NOISE]);
> +			ctx[NOISE] = gem_context_create(fd);
> +		}
> +		above = __noise(fd, ctx[NOISE], prio--, above);
> +	}
> +
> +	gem_context_set_priority(fd, ctx[HI], prio--);
> +
> +	for (; n < MAX_ELSP_QLEN; n++) {
> +		if (flags & CONTEXTS) {
> +			gem_context_destroy(fd, ctx[NOISE]);
> +			ctx[NOISE] = gem_context_create(fd);
> +		}
> +		below = __noise(fd, ctx[NOISE], prio--, below);
> +	}
> +
> +	gem_context_set_priority(fd, ctx[LO], prio--);
> +
>  	n = 0;
> -	for_each_physical_engine(fd, other) {
> -		igt_assert(n < ARRAY_SIZE(spin));
> +	store_dword(fd, ctx[LO], primary,
> +		    result, (n + 1)*sizeof(uint32_t), n + 1,
> +		    0, I915_GEM_DOMAIN_RENDER);
> +	n++;
>  
> -		spin[n] = __igt_spin_batch_new(fd, ctx[NOISE], other, 0);
> -		store_dword(fd, ctx[LO], other,
> -			    result, (n + 1)*sizeof(uint32_t), n + 1,
> -			    0, I915_GEM_DOMAIN_RENDER);
> -		n++;
> +	if (flags & CHAIN) {
> +		for_each_physical_engine(fd, other) {
> +			store_dword(fd, ctx[LO], other,
> +				    result, (n + 1)*sizeof(uint32_t), n + 1,
> +				    0, I915_GEM_DOMAIN_RENDER);
> +			n++;
> +		}
>  	}
> -	store_dword(fd, ctx[HI], ring,
> +
> +	store_dword(fd, ctx[HI], target,
>  		    result, (n + 1)*sizeof(uint32_t), n + 1,
>  		    0, I915_GEM_DOMAIN_RENDER);
>  
>  	igt_debugfs_dump(fd, "i915_engine_info");
> -	gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
>  
> -	for (i = 0; i < n; i++) {
> -		igt_assert(gem_bo_busy(fd, spin[i]->handle));
> -		igt_spin_batch_free(fd, spin[i]);
> +	if (above) {
> +		igt_assert(gem_bo_busy(fd, above->handle));
> +		igt_spin_batch_free(fd, above);
>  	}
>  
> +	gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
> +
>  	n++;
>  	for (i = 0; i <= n; i++)
>  		igt_assert_eq_u32(ptr[i], i);
>  
> +	if (below) {
> +		igt_assert(gem_bo_busy(fd, below->handle));
> +		igt_spin_batch_free(fd, below);
> +	}
> +
>  	gem_context_destroy(fd, ctx[LO]);
>  	gem_context_destroy(fd, ctx[NOISE]);
>  	gem_context_destroy(fd, ctx[HI]);
> @@ -434,6 +560,16 @@ static void preempt_other(int fd, unsigned ring)
>  	gem_close(fd, result);
>  }
>  
> +static void preempt_queue(int fd, unsigned ring, unsigned int flags)
> +{
> +	unsigned other;
> +
> +	for_each_physical_engine(fd, other) {
> +		for (unsigned depth = 0; depth <= MAX_ELSP_QLEN; depth++)
> +			__preempt_queue(fd, ring, other, depth, flags);
> +	}
> +}
> +
>  static void preempt_self(int fd, unsigned ring)
>  {
>  	uint32_t result = gem_create(fd, 4096);
> @@ -981,12 +1117,26 @@ igt_main
>  					igt_subtest_f("preempt-contexts-%s", e->name)
>  						preempt(fd, e->exec_id | e->flags, NEW_CTX);
>  
> -					igt_subtest_f("preempt-other-%s", e->name)
> -						preempt_other(fd, e->exec_id | e->flags);
> -
>  					igt_subtest_f("preempt-self-%s", e->name)
>  						preempt_self(fd, e->exec_id | e->flags);
>  
> +					igt_subtest_f("preempt-other-%s", e->name)
> +						preempt_other(fd, e->exec_id | e->flags, 0);
> +
> +					igt_subtest_f("preempt-other-chain-%s", e->name)
> +						preempt_other(fd, e->exec_id | e->flags, CHAIN);
> +
> +					igt_subtest_f("preempt-queue-%s", e->name)
> +						preempt_queue(fd, e->exec_id | e->flags, 0);
> +
> +					igt_subtest_f("preempt-queue-chain-%s", e->name)
> +						preempt_queue(fd, e->exec_id | e->flags, CHAIN);
> +					igt_subtest_f("preempt-contexts-%s", e->name)

This will collide with the same name as the test above:
preempt(fd, e->exec_id | e->flags, NEW_CTX);

-Mika

> +						preempt_queue(fd, e->exec_id | e->flags, CONTEXTS);
> +
> +					igt_subtest_f("preempt-contexts-chain-%s", e->name)
> +						preempt_queue(fd, e->exec_id | e->flags, CONTEXTS | CHAIN);
> +
>  					igt_subtest_group {
>  						igt_hang_t hang;
>  
> -- 
> 2.16.2
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
Chris Wilson Feb. 27, 2018, 1:41 p.m.
Quoting Mika Kuoppala (2018-02-27 13:25:35)
> >       uint32_t result = gem_create(fd, 4096);
> > @@ -981,12 +1117,26 @@ igt_main
> >                                       igt_subtest_f("preempt-contexts-%s", e->name)
> >                                               preempt(fd, e->exec_id | e->flags, NEW_CTX);
> >  
> > -                                     igt_subtest_f("preempt-other-%s", e->name)
> > -                                             preempt_other(fd, e->exec_id | e->flags);
> > -
> >                                       igt_subtest_f("preempt-self-%s", e->name)
> >                                               preempt_self(fd, e->exec_id | e->flags);
> >  
> > +                                     igt_subtest_f("preempt-other-%s", e->name)
> > +                                             preempt_other(fd, e->exec_id | e->flags, 0);
> > +
> > +                                     igt_subtest_f("preempt-other-chain-%s", e->name)
> > +                                             preempt_other(fd, e->exec_id | e->flags, CHAIN);
> > +
> > +                                     igt_subtest_f("preempt-queue-%s", e->name)
> > +                                             preempt_queue(fd, e->exec_id | e->flags, 0);
> > +
> > +                                     igt_subtest_f("preempt-queue-chain-%s", e->name)
> > +                                             preempt_queue(fd, e->exec_id | e->flags, CHAIN);
> > +                                     igt_subtest_f("preempt-contexts-%s", e->name)
> 
> This will collide with the same name as the test above:
> preempt(fd, e->exec_id | e->flags, NEW_CTX);

Imagine it as preempt-queue-contexts-* then. Thanks,
-Chris