[i-g-t] i915/perf_pmu: Verify that waiters do not report as busy

Submitted by Chris Wilson on Feb. 4, 2019, 5:17 p.m.

Details

Message ID 20190204171704.14291-1-chris@chris-wilson.co.uk
State New
Series "i915/perf_pmu: Verify that waiters do not report as busy"
Headers show

Commit Message

Chris Wilson Feb. 4, 2019, 5:17 p.m.
If we queue requests that must wait for the current spinner to finish,
those waiting engines should not be reported as busy (or else we perturb
scheduling logic that tries to distribute workloads onto idle engines --
if all engines are busy, even those waiting, it degrades into a
round-robin free-for-all).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 tests/i915/perf_pmu.c | 64 +++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 64 insertions(+)

Patch hide | download patch | download mbox

diff --git a/tests/i915/perf_pmu.c b/tests/i915/perf_pmu.c
index 755e1b9ee..aa2583205 100644
--- a/tests/i915/perf_pmu.c
+++ b/tests/i915/perf_pmu.c
@@ -541,6 +541,67 @@  most_busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
 	gem_quiescent_gpu(gem_fd);
 }
 
+static void
+busy_wait_check_all(int gem_fd,
+		    const struct intel_execution_engine2 *e,
+		    const unsigned int num_engines)
+{
+	const struct intel_execution_engine2 *e_;
+	uint64_t tval[2][num_engines];
+	uint64_t val[num_engines];
+	int fd[num_engines];
+	unsigned long slept;
+	igt_spin_t *spin;
+	unsigned int busy_idx, i;
+
+	/*
+	 * One engine busy spins, all others waiting for the spinner, with
+	 * the expected result that only active spinner is reported as busy.
+	 */
+
+	spin = __spin_poll(gem_fd, 0, e2ring(gem_fd, e));
+	spin->obj[0].flags |= EXEC_OBJECT_WRITE;
+
+	i = 0;
+	for_each_engine_class_instance(gem_fd, e_) {
+		if (e == e_)
+			busy_idx = i;
+		else
+			__submit_spin_batch(gem_fd, spin, e_, 64);
+
+		val[i++] = I915_PMU_ENGINE_BUSY(e_->class, e_->instance);
+	}
+	igt_assert(i == num_engines);
+
+	fd[0] = -1;
+	for (i = 0; i < num_engines; i++)
+		fd[i] = open_group(val[i], fd[0]);
+
+	/* Small delay to allow engines to start. */
+	usleep(__spin_wait(gem_fd, spin) * num_engines / 1e3);
+
+	pmu_read_multi(fd[0], num_engines, tval[0]);
+	slept = measured_usleep(batch_duration_ns / 1000);
+	pmu_read_multi(fd[0], num_engines, tval[1]);
+
+	end_spin(gem_fd, spin, FLAG_SYNC);
+	igt_spin_batch_free(gem_fd, spin);
+	close(fd[0]);
+
+	for (i = 0; i < num_engines; i++)
+		val[i] = tval[1][i] - tval[0][i];
+
+	log_busy(num_engines, val);
+
+	for (i = 0; i < num_engines; i++) {
+		if (i == busy_idx)
+			assert_within_epsilon(val[i], slept, tolerance);
+		else
+			assert_within_epsilon(val[i], 0.0f, tolerance);
+	}
+	gem_quiescent_gpu(gem_fd);
+}
+
 static void
 all_busy_check_all(int gem_fd, const unsigned int num_engines,
 		   unsigned int flags)
@@ -1742,6 +1803,9 @@  igt_main
 				busy_check_all(fd, e, num_engines,
 					       TEST_BUSY | TEST_TRAILING_IDLE);
 
+			igt_subtest_f("busy-wait-check-all-%s", e->name)
+				busy_wait_check_all(fd, e, num_engines);
+
 			/**
 			 * Test that when all except one engine are loaded all
 			 * loads are correctly reported.