[v2] tests/amdgpu: add deadlock test for sdma

Submitted by Cui, Flora on March 6, 2019, 6:37 a.m.

Details

Message ID 1551854219-4458-1-git-send-email-flora.cui@amd.com
State New
Headers show
Series "tests/amdgpu: add deadlock test for sdma" ( rev: 2 ) in AMD X.Org drivers

Not browsing as part of any series.

Commit Message

Cui, Flora March 6, 2019, 6:37 a.m.
deadlock test for sdma will cause gpu recoverty.
disable the test for now until GPU reset recovery could survive at least
1000 times test.

v2: add modprobe parameter

Change-Id: I9adac63c62db22107345eddb30e7d81a1bda838c
Signed-off-by: Flora Cui <flora.cui@amd.com>
---
 tests/amdgpu/amdgpu_test.c    |   4 ++
 tests/amdgpu/deadlock_tests.c | 103 +++++++++++++++++++++++++++++++++++++++++-
 2 files changed, 106 insertions(+), 1 deletion(-)

Patch hide | download patch | download mbox

diff --git a/tests/amdgpu/amdgpu_test.c b/tests/amdgpu/amdgpu_test.c
index ebf4409..38b8a68 100644
--- a/tests/amdgpu/amdgpu_test.c
+++ b/tests/amdgpu/amdgpu_test.c
@@ -426,6 +426,10 @@  static void amdgpu_disable_suites()
 			"compute ring block test (set amdgpu.lockup_timeout=50)", CU_FALSE))
 		fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
 
+	if (amdgpu_set_test_active(DEADLOCK_TESTS_STR,
+				"sdma ring block test (set amdgpu.lockup_timeout=50)", CU_FALSE))
+		fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
+
 	if (amdgpu_set_test_active(BO_TESTS_STR, "Metadata", CU_FALSE))
 		fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
 
diff --git a/tests/amdgpu/deadlock_tests.c b/tests/amdgpu/deadlock_tests.c
index a6c2635..91368c1 100644
--- a/tests/amdgpu/deadlock_tests.c
+++ b/tests/amdgpu/deadlock_tests.c
@@ -96,6 +96,9 @@ 
 
 #define mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR                                      0x54f
 
+#define SDMA_PKT_HEADER_OP(x)	(x & 0xff)
+#define SDMA_OP_POLL_REGMEM  8
+
 static  amdgpu_device_handle device_handle;
 static  uint32_t  major_version;
 static  uint32_t  minor_version;
@@ -110,6 +113,7 @@  static void amdgpu_deadlock_gfx(void);
 static void amdgpu_deadlock_compute(void);
 static void amdgpu_illegal_reg_access();
 static void amdgpu_illegal_mem_access();
+static void amdgpu_deadlock_sdma(void);
 
 CU_BOOL suite_deadlock_tests_enable(void)
 {
@@ -171,6 +175,7 @@  int suite_deadlock_tests_clean(void)
 CU_TestInfo deadlock_tests[] = {
 	{ "gfx ring block test (set amdgpu.lockup_timeout=50)", amdgpu_deadlock_gfx },
 	{ "compute ring block test (set amdgpu.lockup_timeout=50)", amdgpu_deadlock_compute },
+	{ "sdma ring block test (set amdgpu.lockup_timeout=50)", amdgpu_deadlock_sdma },
 	{ "illegal reg access test", amdgpu_illegal_reg_access },
 	{ "illegal mem access test (set amdgpu.vm_fault_stop=2)", amdgpu_illegal_mem_access },
 	CU_TEST_INFO_NULL,
@@ -260,7 +265,6 @@  static void amdgpu_deadlock_helper(unsigned ip_type)
 	ibs_request.ibs = &ib_info;
 	ibs_request.resources = bo_list;
 	ibs_request.fence_info.handle = NULL;
-
 	for (i = 0; i < 200; i++) {
 		r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);
 		CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
@@ -291,6 +295,103 @@  static void amdgpu_deadlock_helper(unsigned ip_type)
 	CU_ASSERT_EQUAL(r, 0);
 }
 
+static void amdgpu_deadlock_sdma(void)
+{
+	amdgpu_context_handle context_handle;
+	amdgpu_bo_handle ib_result_handle;
+	void *ib_result_cpu;
+	uint64_t ib_result_mc_address;
+	struct amdgpu_cs_request ibs_request;
+	struct amdgpu_cs_ib_info ib_info;
+	struct amdgpu_cs_fence fence_status;
+	uint32_t expired;
+	int i, r;
+	amdgpu_bo_list_handle bo_list;
+	amdgpu_va_handle va_handle;
+	struct drm_amdgpu_info_hw_ip info;
+	uint32_t ring_id;
+
+	r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_DMA, 0, &info);
+	CU_ASSERT_EQUAL(r, 0);
+
+	r = amdgpu_cs_ctx_create(device_handle, &context_handle);
+	CU_ASSERT_EQUAL(r, 0);
+
+	for (ring_id = 0; (1 << ring_id) & info.available_rings; ring_id++) {
+		r = pthread_create(&stress_thread, NULL, write_mem_address, NULL);
+		CU_ASSERT_EQUAL(r, 0);
+
+		r = amdgpu_bo_alloc_and_map_raw(device_handle, 4096, 4096,
+				AMDGPU_GEM_DOMAIN_GTT, 0, use_uc_mtype ? AMDGPU_VM_MTYPE_UC : 0,
+							    &ib_result_handle, &ib_result_cpu,
+							    &ib_result_mc_address, &va_handle);
+		CU_ASSERT_EQUAL(r, 0);
+
+		r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
+				       &bo_list);
+		CU_ASSERT_EQUAL(r, 0);
+
+		ptr = ib_result_cpu;
+		i = 0;
+
+		ptr[i++] = SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+				(0 << 26) | /* WAIT_REG_MEM */
+				(4 << 28) | /* != */
+				(1 << 31); /* memory */
+		ptr[i++] = (ib_result_mc_address + 256*4) & 0xfffffffc;
+		ptr[i++] = ((ib_result_mc_address + 256*4) >> 32) & 0xffffffff;
+		ptr[i++] = 0x00000000; /* reference value */
+		ptr[i++] = 0xffffffff; /* and mask */
+		ptr[i++] =  4 | /* poll interval */
+				(0xfff << 16); /* retry count */
+
+		for (; i < 16; i++)
+			ptr[i] = 0;
+
+		ptr[256] = 0x0; /* the memory we wait on to change */
+
+		memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
+		ib_info.ib_mc_address = ib_result_mc_address;
+		ib_info.size = 16;
+
+		memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
+		ibs_request.ip_type = AMDGPU_HW_IP_DMA;
+		ibs_request.ring = ring_id;
+		ibs_request.number_of_ibs = 1;
+		ibs_request.ibs = &ib_info;
+		ibs_request.resources = bo_list;
+		ibs_request.fence_info.handle = NULL;
+
+		for (i = 0; i < 200; i++) {
+			r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);
+			CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
+
+		}
+
+		memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
+		fence_status.context = context_handle;
+		fence_status.ip_type = AMDGPU_HW_IP_DMA;
+		fence_status.ip_instance = 0;
+		fence_status.ring = ring_id;
+		fence_status.fence = ibs_request.seq_no;
+
+		r = amdgpu_cs_query_fence_status(&fence_status,
+				AMDGPU_TIMEOUT_INFINITE,0, &expired);
+		CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
+
+		pthread_join(stress_thread, NULL);
+
+		r = amdgpu_bo_list_destroy(bo_list);
+		CU_ASSERT_EQUAL(r, 0);
+
+		r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
+					     ib_result_mc_address, 4096);
+		CU_ASSERT_EQUAL(r, 0);
+	}
+	r = amdgpu_cs_ctx_free(context_handle);
+	CU_ASSERT_EQUAL(r, 0);
+}
+
 static void bad_access_helper(int reg_access)
 {
 	amdgpu_context_handle context_handle;

Comments

Reviewed-and-tested-by: Evan Quan <evan.quan@amd.com>

> -----Original Message-----

> From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> On Behalf Of Cui,

> Flora

> Sent: Wednesday, March 06, 2019 2:37 PM

> To: amd-gfx@lists.freedesktop.org; dri-devel@lists.freedesktop.org

> Cc: Cui, Flora <Flora.Cui@amd.com>

> Subject: [PATCH v2] tests/amdgpu: add deadlock test for sdma

> 

> deadlock test for sdma will cause gpu recoverty.

> disable the test for now until GPU reset recovery could survive at least

> 1000 times test.

> 

> v2: add modprobe parameter

> 

> Change-Id: I9adac63c62db22107345eddb30e7d81a1bda838c

> Signed-off-by: Flora Cui <flora.cui@amd.com>

> ---

>  tests/amdgpu/amdgpu_test.c    |   4 ++

>  tests/amdgpu/deadlock_tests.c | 103

> +++++++++++++++++++++++++++++++++++++++++-

>  2 files changed, 106 insertions(+), 1 deletion(-)

> 

> diff --git a/tests/amdgpu/amdgpu_test.c b/tests/amdgpu/amdgpu_test.c

> index ebf4409..38b8a68 100644

> --- a/tests/amdgpu/amdgpu_test.c

> +++ b/tests/amdgpu/amdgpu_test.c

> @@ -426,6 +426,10 @@ static void amdgpu_disable_suites()

>  			"compute ring block test (set

> amdgpu.lockup_timeout=50)", CU_FALSE))

>  		fprintf(stderr, "test deactivation failed - %s\n",

> CU_get_error_msg());

> 

> +	if (amdgpu_set_test_active(DEADLOCK_TESTS_STR,

> +				"sdma ring block test (set

> amdgpu.lockup_timeout=50)", CU_FALSE))

> +		fprintf(stderr, "test deactivation failed - %s\n",

> +CU_get_error_msg());

> +

>  	if (amdgpu_set_test_active(BO_TESTS_STR, "Metadata", CU_FALSE))

>  		fprintf(stderr, "test deactivation failed - %s\n",

> CU_get_error_msg());

> 

> diff --git a/tests/amdgpu/deadlock_tests.c

> b/tests/amdgpu/deadlock_tests.c index a6c2635..91368c1 100644

> --- a/tests/amdgpu/deadlock_tests.c

> +++ b/tests/amdgpu/deadlock_tests.c

> @@ -96,6 +96,9 @@

> 

>  #define mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR

> 0x54f

> 

> +#define SDMA_PKT_HEADER_OP(x)	(x & 0xff)

> +#define SDMA_OP_POLL_REGMEM  8

> +

>  static  amdgpu_device_handle device_handle;  static  uint32_t

> major_version;  static  uint32_t  minor_version; @@ -110,6 +113,7 @@ static

> void amdgpu_deadlock_gfx(void);  static void

> amdgpu_deadlock_compute(void);  static void amdgpu_illegal_reg_access();

> static void amdgpu_illegal_mem_access();

> +static void amdgpu_deadlock_sdma(void);

> 

>  CU_BOOL suite_deadlock_tests_enable(void)  { @@ -171,6 +175,7 @@ int

> suite_deadlock_tests_clean(void)  CU_TestInfo deadlock_tests[] = {

>  	{ "gfx ring block test (set amdgpu.lockup_timeout=50)",

> amdgpu_deadlock_gfx },

>  	{ "compute ring block test (set amdgpu.lockup_timeout=50)",

> amdgpu_deadlock_compute },

> +	{ "sdma ring block test (set amdgpu.lockup_timeout=50)",

> +amdgpu_deadlock_sdma },

>  	{ "illegal reg access test", amdgpu_illegal_reg_access },

>  	{ "illegal mem access test (set amdgpu.vm_fault_stop=2)",

> amdgpu_illegal_mem_access },

>  	CU_TEST_INFO_NULL,

> @@ -260,7 +265,6 @@ static void amdgpu_deadlock_helper(unsigned

> ip_type)

>  	ibs_request.ibs = &ib_info;

>  	ibs_request.resources = bo_list;

>  	ibs_request.fence_info.handle = NULL;

> -

>  	for (i = 0; i < 200; i++) {

>  		r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);

>  		CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1); @@ -

> 291,6 +295,103 @@ static void amdgpu_deadlock_helper(unsigned ip_type)

>  	CU_ASSERT_EQUAL(r, 0);

>  }

> 

> +static void amdgpu_deadlock_sdma(void)

> +{

> +	amdgpu_context_handle context_handle;

> +	amdgpu_bo_handle ib_result_handle;

> +	void *ib_result_cpu;

> +	uint64_t ib_result_mc_address;

> +	struct amdgpu_cs_request ibs_request;

> +	struct amdgpu_cs_ib_info ib_info;

> +	struct amdgpu_cs_fence fence_status;

> +	uint32_t expired;

> +	int i, r;

> +	amdgpu_bo_list_handle bo_list;

> +	amdgpu_va_handle va_handle;

> +	struct drm_amdgpu_info_hw_ip info;

> +	uint32_t ring_id;

> +

> +	r = amdgpu_query_hw_ip_info(device_handle,

> AMDGPU_HW_IP_DMA, 0, &info);

> +	CU_ASSERT_EQUAL(r, 0);

> +

> +	r = amdgpu_cs_ctx_create(device_handle, &context_handle);

> +	CU_ASSERT_EQUAL(r, 0);

> +

> +	for (ring_id = 0; (1 << ring_id) & info.available_rings; ring_id++) {

> +		r = pthread_create(&stress_thread, NULL,

> write_mem_address, NULL);

> +		CU_ASSERT_EQUAL(r, 0);

> +

> +		r = amdgpu_bo_alloc_and_map_raw(device_handle, 4096,

> 4096,

> +				AMDGPU_GEM_DOMAIN_GTT, 0,

> use_uc_mtype ? AMDGPU_VM_MTYPE_UC : 0,

> +							    &ib_result_handle,

> &ib_result_cpu,

> +

> &ib_result_mc_address, &va_handle);

> +		CU_ASSERT_EQUAL(r, 0);

> +

> +		r = amdgpu_get_bo_list(device_handle, ib_result_handle,

> NULL,

> +				       &bo_list);

> +		CU_ASSERT_EQUAL(r, 0);

> +

> +		ptr = ib_result_cpu;

> +		i = 0;

> +

> +		ptr[i++] =

> SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |

> +				(0 << 26) | /* WAIT_REG_MEM */

> +				(4 << 28) | /* != */

> +				(1 << 31); /* memory */

> +		ptr[i++] = (ib_result_mc_address + 256*4) & 0xfffffffc;

> +		ptr[i++] = ((ib_result_mc_address + 256*4) >> 32) & 0xffffffff;

> +		ptr[i++] = 0x00000000; /* reference value */

> +		ptr[i++] = 0xffffffff; /* and mask */

> +		ptr[i++] =  4 | /* poll interval */

> +				(0xfff << 16); /* retry count */

> +

> +		for (; i < 16; i++)

> +			ptr[i] = 0;

> +

> +		ptr[256] = 0x0; /* the memory we wait on to change */

> +

> +		memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));

> +		ib_info.ib_mc_address = ib_result_mc_address;

> +		ib_info.size = 16;

> +

> +		memset(&ibs_request, 0, sizeof(struct

> amdgpu_cs_request));

> +		ibs_request.ip_type = AMDGPU_HW_IP_DMA;

> +		ibs_request.ring = ring_id;

> +		ibs_request.number_of_ibs = 1;

> +		ibs_request.ibs = &ib_info;

> +		ibs_request.resources = bo_list;

> +		ibs_request.fence_info.handle = NULL;

> +

> +		for (i = 0; i < 200; i++) {

> +			r = amdgpu_cs_submit(context_handle,

> 0,&ibs_request, 1);

> +			CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);

> +

> +		}

> +

> +		memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));

> +		fence_status.context = context_handle;

> +		fence_status.ip_type = AMDGPU_HW_IP_DMA;

> +		fence_status.ip_instance = 0;

> +		fence_status.ring = ring_id;

> +		fence_status.fence = ibs_request.seq_no;

> +

> +		r = amdgpu_cs_query_fence_status(&fence_status,

> +				AMDGPU_TIMEOUT_INFINITE,0, &expired);

> +		CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);

> +

> +		pthread_join(stress_thread, NULL);

> +

> +		r = amdgpu_bo_list_destroy(bo_list);

> +		CU_ASSERT_EQUAL(r, 0);

> +

> +		r = amdgpu_bo_unmap_and_free(ib_result_handle,

> va_handle,

> +					     ib_result_mc_address, 4096);

> +		CU_ASSERT_EQUAL(r, 0);

> +	}

> +	r = amdgpu_cs_ctx_free(context_handle);

> +	CU_ASSERT_EQUAL(r, 0);

> +}

> +

>  static void bad_access_helper(int reg_access)  {

>  	amdgpu_context_handle context_handle;

> --

> 2.7.4

> 

> _______________________________________________

> amd-gfx mailing list

> amd-gfx@lists.freedesktop.org

> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
Am 06.03.19 um 07:37 schrieb Cui, Flora:
> deadlock test for sdma will cause gpu recoverty.
> disable the test for now until GPU reset recovery could survive at least
> 1000 times test.
>
> v2: add modprobe parameter
>
> Change-Id: I9adac63c62db22107345eddb30e7d81a1bda838c
> Signed-off-by: Flora Cui <flora.cui@amd.com>

Acked-by: Christian König <christian.koenig@amd.com>

> ---
>   tests/amdgpu/amdgpu_test.c    |   4 ++
>   tests/amdgpu/deadlock_tests.c | 103 +++++++++++++++++++++++++++++++++++++++++-
>   2 files changed, 106 insertions(+), 1 deletion(-)
>
> diff --git a/tests/amdgpu/amdgpu_test.c b/tests/amdgpu/amdgpu_test.c
> index ebf4409..38b8a68 100644
> --- a/tests/amdgpu/amdgpu_test.c
> +++ b/tests/amdgpu/amdgpu_test.c
> @@ -426,6 +426,10 @@ static void amdgpu_disable_suites()
>   			"compute ring block test (set amdgpu.lockup_timeout=50)", CU_FALSE))
>   		fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
>   
> +	if (amdgpu_set_test_active(DEADLOCK_TESTS_STR,
> +				"sdma ring block test (set amdgpu.lockup_timeout=50)", CU_FALSE))
> +		fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
> +
>   	if (amdgpu_set_test_active(BO_TESTS_STR, "Metadata", CU_FALSE))
>   		fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
>   
> diff --git a/tests/amdgpu/deadlock_tests.c b/tests/amdgpu/deadlock_tests.c
> index a6c2635..91368c1 100644
> --- a/tests/amdgpu/deadlock_tests.c
> +++ b/tests/amdgpu/deadlock_tests.c
> @@ -96,6 +96,9 @@
>   
>   #define mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR                                      0x54f
>   
> +#define SDMA_PKT_HEADER_OP(x)	(x & 0xff)
> +#define SDMA_OP_POLL_REGMEM  8
> +
>   static  amdgpu_device_handle device_handle;
>   static  uint32_t  major_version;
>   static  uint32_t  minor_version;
> @@ -110,6 +113,7 @@ static void amdgpu_deadlock_gfx(void);
>   static void amdgpu_deadlock_compute(void);
>   static void amdgpu_illegal_reg_access();
>   static void amdgpu_illegal_mem_access();
> +static void amdgpu_deadlock_sdma(void);
>   
>   CU_BOOL suite_deadlock_tests_enable(void)
>   {
> @@ -171,6 +175,7 @@ int suite_deadlock_tests_clean(void)
>   CU_TestInfo deadlock_tests[] = {
>   	{ "gfx ring block test (set amdgpu.lockup_timeout=50)", amdgpu_deadlock_gfx },
>   	{ "compute ring block test (set amdgpu.lockup_timeout=50)", amdgpu_deadlock_compute },
> +	{ "sdma ring block test (set amdgpu.lockup_timeout=50)", amdgpu_deadlock_sdma },
>   	{ "illegal reg access test", amdgpu_illegal_reg_access },
>   	{ "illegal mem access test (set amdgpu.vm_fault_stop=2)", amdgpu_illegal_mem_access },
>   	CU_TEST_INFO_NULL,
> @@ -260,7 +265,6 @@ static void amdgpu_deadlock_helper(unsigned ip_type)
>   	ibs_request.ibs = &ib_info;
>   	ibs_request.resources = bo_list;
>   	ibs_request.fence_info.handle = NULL;
> -
>   	for (i = 0; i < 200; i++) {
>   		r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);
>   		CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
> @@ -291,6 +295,103 @@ static void amdgpu_deadlock_helper(unsigned ip_type)
>   	CU_ASSERT_EQUAL(r, 0);
>   }
>   
> +static void amdgpu_deadlock_sdma(void)
> +{
> +	amdgpu_context_handle context_handle;
> +	amdgpu_bo_handle ib_result_handle;
> +	void *ib_result_cpu;
> +	uint64_t ib_result_mc_address;
> +	struct amdgpu_cs_request ibs_request;
> +	struct amdgpu_cs_ib_info ib_info;
> +	struct amdgpu_cs_fence fence_status;
> +	uint32_t expired;
> +	int i, r;
> +	amdgpu_bo_list_handle bo_list;
> +	amdgpu_va_handle va_handle;
> +	struct drm_amdgpu_info_hw_ip info;
> +	uint32_t ring_id;
> +
> +	r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_DMA, 0, &info);
> +	CU_ASSERT_EQUAL(r, 0);
> +
> +	r = amdgpu_cs_ctx_create(device_handle, &context_handle);
> +	CU_ASSERT_EQUAL(r, 0);
> +
> +	for (ring_id = 0; (1 << ring_id) & info.available_rings; ring_id++) {
> +		r = pthread_create(&stress_thread, NULL, write_mem_address, NULL);
> +		CU_ASSERT_EQUAL(r, 0);
> +
> +		r = amdgpu_bo_alloc_and_map_raw(device_handle, 4096, 4096,
> +				AMDGPU_GEM_DOMAIN_GTT, 0, use_uc_mtype ? AMDGPU_VM_MTYPE_UC : 0,
> +							    &ib_result_handle, &ib_result_cpu,
> +							    &ib_result_mc_address, &va_handle);
> +		CU_ASSERT_EQUAL(r, 0);
> +
> +		r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
> +				       &bo_list);
> +		CU_ASSERT_EQUAL(r, 0);
> +
> +		ptr = ib_result_cpu;
> +		i = 0;
> +
> +		ptr[i++] = SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
> +				(0 << 26) | /* WAIT_REG_MEM */
> +				(4 << 28) | /* != */
> +				(1 << 31); /* memory */
> +		ptr[i++] = (ib_result_mc_address + 256*4) & 0xfffffffc;
> +		ptr[i++] = ((ib_result_mc_address + 256*4) >> 32) & 0xffffffff;
> +		ptr[i++] = 0x00000000; /* reference value */
> +		ptr[i++] = 0xffffffff; /* and mask */
> +		ptr[i++] =  4 | /* poll interval */
> +				(0xfff << 16); /* retry count */
> +
> +		for (; i < 16; i++)
> +			ptr[i] = 0;
> +
> +		ptr[256] = 0x0; /* the memory we wait on to change */
> +
> +		memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
> +		ib_info.ib_mc_address = ib_result_mc_address;
> +		ib_info.size = 16;
> +
> +		memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
> +		ibs_request.ip_type = AMDGPU_HW_IP_DMA;
> +		ibs_request.ring = ring_id;
> +		ibs_request.number_of_ibs = 1;
> +		ibs_request.ibs = &ib_info;
> +		ibs_request.resources = bo_list;
> +		ibs_request.fence_info.handle = NULL;
> +
> +		for (i = 0; i < 200; i++) {
> +			r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);
> +			CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
> +
> +		}
> +
> +		memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
> +		fence_status.context = context_handle;
> +		fence_status.ip_type = AMDGPU_HW_IP_DMA;
> +		fence_status.ip_instance = 0;
> +		fence_status.ring = ring_id;
> +		fence_status.fence = ibs_request.seq_no;
> +
> +		r = amdgpu_cs_query_fence_status(&fence_status,
> +				AMDGPU_TIMEOUT_INFINITE,0, &expired);
> +		CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
> +
> +		pthread_join(stress_thread, NULL);
> +
> +		r = amdgpu_bo_list_destroy(bo_list);
> +		CU_ASSERT_EQUAL(r, 0);
> +
> +		r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
> +					     ib_result_mc_address, 4096);
> +		CU_ASSERT_EQUAL(r, 0);
> +	}
> +	r = amdgpu_cs_ctx_free(context_handle);
> +	CU_ASSERT_EQUAL(r, 0);
> +}
> +
>   static void bad_access_helper(int reg_access)
>   {
>   	amdgpu_context_handle context_handle;
On 3/6/19 1:37 AM, Cui, Flora wrote:

> deadlock test for sdma will cause gpu recoverty.

> disable the test for now until GPU reset recovery could survive at least

> 1000 times test.


Can you specify what issues you see and on what ASIC ?

Andrey

>

> v2: add modprobe parameter

>

> Change-Id: I9adac63c62db22107345eddb30e7d81a1bda838c

> Signed-off-by: Flora Cui <flora.cui@amd.com>

> ---

>   tests/amdgpu/amdgpu_test.c    |   4 ++

>   tests/amdgpu/deadlock_tests.c | 103 +++++++++++++++++++++++++++++++++++++++++-

>   2 files changed, 106 insertions(+), 1 deletion(-)

>

> diff --git a/tests/amdgpu/amdgpu_test.c b/tests/amdgpu/amdgpu_test.c

> index ebf4409..38b8a68 100644

> --- a/tests/amdgpu/amdgpu_test.c

> +++ b/tests/amdgpu/amdgpu_test.c

> @@ -426,6 +426,10 @@ static void amdgpu_disable_suites()

>   			"compute ring block test (set amdgpu.lockup_timeout=50)", CU_FALSE))

>   		fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());

>   

> +	if (amdgpu_set_test_active(DEADLOCK_TESTS_STR,

> +				"sdma ring block test (set amdgpu.lockup_timeout=50)", CU_FALSE))

> +		fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());

> +

>   	if (amdgpu_set_test_active(BO_TESTS_STR, "Metadata", CU_FALSE))

>   		fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());

>   

> diff --git a/tests/amdgpu/deadlock_tests.c b/tests/amdgpu/deadlock_tests.c

> index a6c2635..91368c1 100644

> --- a/tests/amdgpu/deadlock_tests.c

> +++ b/tests/amdgpu/deadlock_tests.c

> @@ -96,6 +96,9 @@

>   

>   #define mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR                                      0x54f

>   

> +#define SDMA_PKT_HEADER_OP(x)	(x & 0xff)

> +#define SDMA_OP_POLL_REGMEM  8

> +

>   static  amdgpu_device_handle device_handle;

>   static  uint32_t  major_version;

>   static  uint32_t  minor_version;

> @@ -110,6 +113,7 @@ static void amdgpu_deadlock_gfx(void);

>   static void amdgpu_deadlock_compute(void);

>   static void amdgpu_illegal_reg_access();

>   static void amdgpu_illegal_mem_access();

> +static void amdgpu_deadlock_sdma(void);

>   

>   CU_BOOL suite_deadlock_tests_enable(void)

>   {

> @@ -171,6 +175,7 @@ int suite_deadlock_tests_clean(void)

>   CU_TestInfo deadlock_tests[] = {

>   	{ "gfx ring block test (set amdgpu.lockup_timeout=50)", amdgpu_deadlock_gfx },

>   	{ "compute ring block test (set amdgpu.lockup_timeout=50)", amdgpu_deadlock_compute },

> +	{ "sdma ring block test (set amdgpu.lockup_timeout=50)", amdgpu_deadlock_sdma },

>   	{ "illegal reg access test", amdgpu_illegal_reg_access },

>   	{ "illegal mem access test (set amdgpu.vm_fault_stop=2)", amdgpu_illegal_mem_access },

>   	CU_TEST_INFO_NULL,

> @@ -260,7 +265,6 @@ static void amdgpu_deadlock_helper(unsigned ip_type)

>   	ibs_request.ibs = &ib_info;

>   	ibs_request.resources = bo_list;

>   	ibs_request.fence_info.handle = NULL;

> -

>   	for (i = 0; i < 200; i++) {

>   		r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);

>   		CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);

> @@ -291,6 +295,103 @@ static void amdgpu_deadlock_helper(unsigned ip_type)

>   	CU_ASSERT_EQUAL(r, 0);

>   }

>   

> +static void amdgpu_deadlock_sdma(void)

> +{

> +	amdgpu_context_handle context_handle;

> +	amdgpu_bo_handle ib_result_handle;

> +	void *ib_result_cpu;

> +	uint64_t ib_result_mc_address;

> +	struct amdgpu_cs_request ibs_request;

> +	struct amdgpu_cs_ib_info ib_info;

> +	struct amdgpu_cs_fence fence_status;

> +	uint32_t expired;

> +	int i, r;

> +	amdgpu_bo_list_handle bo_list;

> +	amdgpu_va_handle va_handle;

> +	struct drm_amdgpu_info_hw_ip info;

> +	uint32_t ring_id;

> +

> +	r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_DMA, 0, &info);

> +	CU_ASSERT_EQUAL(r, 0);

> +

> +	r = amdgpu_cs_ctx_create(device_handle, &context_handle);

> +	CU_ASSERT_EQUAL(r, 0);

> +

> +	for (ring_id = 0; (1 << ring_id) & info.available_rings; ring_id++) {

> +		r = pthread_create(&stress_thread, NULL, write_mem_address, NULL);

> +		CU_ASSERT_EQUAL(r, 0);

> +

> +		r = amdgpu_bo_alloc_and_map_raw(device_handle, 4096, 4096,

> +				AMDGPU_GEM_DOMAIN_GTT, 0, use_uc_mtype ? AMDGPU_VM_MTYPE_UC : 0,

> +							    &ib_result_handle, &ib_result_cpu,

> +							    &ib_result_mc_address, &va_handle);

> +		CU_ASSERT_EQUAL(r, 0);

> +

> +		r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,

> +				       &bo_list);

> +		CU_ASSERT_EQUAL(r, 0);

> +

> +		ptr = ib_result_cpu;

> +		i = 0;

> +

> +		ptr[i++] = SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |

> +				(0 << 26) | /* WAIT_REG_MEM */

> +				(4 << 28) | /* != */

> +				(1 << 31); /* memory */

> +		ptr[i++] = (ib_result_mc_address + 256*4) & 0xfffffffc;

> +		ptr[i++] = ((ib_result_mc_address + 256*4) >> 32) & 0xffffffff;

> +		ptr[i++] = 0x00000000; /* reference value */

> +		ptr[i++] = 0xffffffff; /* and mask */

> +		ptr[i++] =  4 | /* poll interval */

> +				(0xfff << 16); /* retry count */

> +

> +		for (; i < 16; i++)

> +			ptr[i] = 0;

> +

> +		ptr[256] = 0x0; /* the memory we wait on to change */

> +

> +		memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));

> +		ib_info.ib_mc_address = ib_result_mc_address;

> +		ib_info.size = 16;

> +

> +		memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));

> +		ibs_request.ip_type = AMDGPU_HW_IP_DMA;

> +		ibs_request.ring = ring_id;

> +		ibs_request.number_of_ibs = 1;

> +		ibs_request.ibs = &ib_info;

> +		ibs_request.resources = bo_list;

> +		ibs_request.fence_info.handle = NULL;

> +

> +		for (i = 0; i < 200; i++) {

> +			r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);

> +			CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);

> +

> +		}

> +

> +		memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));

> +		fence_status.context = context_handle;

> +		fence_status.ip_type = AMDGPU_HW_IP_DMA;

> +		fence_status.ip_instance = 0;

> +		fence_status.ring = ring_id;

> +		fence_status.fence = ibs_request.seq_no;

> +

> +		r = amdgpu_cs_query_fence_status(&fence_status,

> +				AMDGPU_TIMEOUT_INFINITE,0, &expired);

> +		CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);

> +

> +		pthread_join(stress_thread, NULL);

> +

> +		r = amdgpu_bo_list_destroy(bo_list);

> +		CU_ASSERT_EQUAL(r, 0);

> +

> +		r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,

> +					     ib_result_mc_address, 4096);

> +		CU_ASSERT_EQUAL(r, 0);

> +	}

> +	r = amdgpu_cs_ctx_free(context_handle);

> +	CU_ASSERT_EQUAL(r, 0);

> +}

> +

>   static void bad_access_helper(int reg_access)

>   {

>   	amdgpu_context_handle context_handle;
Hi Alex,

could you help to submit this patch upstream?

在 3/6/2019 4:08 PM, Christian König 写道:
> Am 06.03.19 um 07:37 schrieb Cui, Flora:

>> deadlock test for sdma will cause gpu recoverty.

>> disable the test for now until GPU reset recovery could survive at least

>> 1000 times test.

>>

>> v2: add modprobe parameter

>>

>> Change-Id: I9adac63c62db22107345eddb30e7d81a1bda838c

>> Signed-off-by: Flora Cui <flora.cui@amd.com>

>

> Acked-by: Christian König <christian.koenig@amd.com>

>

>> ---

>>   tests/amdgpu/amdgpu_test.c    |   4 ++

>>   tests/amdgpu/deadlock_tests.c | 103 

>> +++++++++++++++++++++++++++++++++++++++++-

>>   2 files changed, 106 insertions(+), 1 deletion(-)

>>

>> diff --git a/tests/amdgpu/amdgpu_test.c b/tests/amdgpu/amdgpu_test.c

>> index ebf4409..38b8a68 100644

>> --- a/tests/amdgpu/amdgpu_test.c

>> +++ b/tests/amdgpu/amdgpu_test.c

>> @@ -426,6 +426,10 @@ static void amdgpu_disable_suites()

>>               "compute ring block test (set 

>> amdgpu.lockup_timeout=50)", CU_FALSE))

>>           fprintf(stderr, "test deactivation failed - %s\n", 

>> CU_get_error_msg());

>>   +    if (amdgpu_set_test_active(DEADLOCK_TESTS_STR,

>> +                "sdma ring block test (set 

>> amdgpu.lockup_timeout=50)", CU_FALSE))

>> +        fprintf(stderr, "test deactivation failed - %s\n", 

>> CU_get_error_msg());

>> +

>>       if (amdgpu_set_test_active(BO_TESTS_STR, "Metadata", CU_FALSE))

>>           fprintf(stderr, "test deactivation failed - %s\n", 

>> CU_get_error_msg());

>>   diff --git a/tests/amdgpu/deadlock_tests.c 

>> b/tests/amdgpu/deadlock_tests.c

>> index a6c2635..91368c1 100644

>> --- a/tests/amdgpu/deadlock_tests.c

>> +++ b/tests/amdgpu/deadlock_tests.c

>> @@ -96,6 +96,9 @@

>>     #define mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x54f

>>   +#define SDMA_PKT_HEADER_OP(x)    (x & 0xff)

>> +#define SDMA_OP_POLL_REGMEM  8

>> +

>>   static  amdgpu_device_handle device_handle;

>>   static  uint32_t  major_version;

>>   static  uint32_t  minor_version;

>> @@ -110,6 +113,7 @@ static void amdgpu_deadlock_gfx(void);

>>   static void amdgpu_deadlock_compute(void);

>>   static void amdgpu_illegal_reg_access();

>>   static void amdgpu_illegal_mem_access();

>> +static void amdgpu_deadlock_sdma(void);

>>     CU_BOOL suite_deadlock_tests_enable(void)

>>   {

>> @@ -171,6 +175,7 @@ int suite_deadlock_tests_clean(void)

>>   CU_TestInfo deadlock_tests[] = {

>>       { "gfx ring block test (set amdgpu.lockup_timeout=50)", 

>> amdgpu_deadlock_gfx },

>>       { "compute ring block test (set amdgpu.lockup_timeout=50)", 

>> amdgpu_deadlock_compute },

>> +    { "sdma ring block test (set amdgpu.lockup_timeout=50)", 

>> amdgpu_deadlock_sdma },

>>       { "illegal reg access test", amdgpu_illegal_reg_access },

>>       { "illegal mem access test (set amdgpu.vm_fault_stop=2)", 

>> amdgpu_illegal_mem_access },

>>       CU_TEST_INFO_NULL,

>> @@ -260,7 +265,6 @@ static void amdgpu_deadlock_helper(unsigned ip_type)

>>       ibs_request.ibs = &ib_info;

>>       ibs_request.resources = bo_list;

>>       ibs_request.fence_info.handle = NULL;

>> -

>>       for (i = 0; i < 200; i++) {

>>           r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);

>>           CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);

>> @@ -291,6 +295,103 @@ static void amdgpu_deadlock_helper(unsigned 

>> ip_type)

>>       CU_ASSERT_EQUAL(r, 0);

>>   }

>>   +static void amdgpu_deadlock_sdma(void)

>> +{

>> +    amdgpu_context_handle context_handle;

>> +    amdgpu_bo_handle ib_result_handle;

>> +    void *ib_result_cpu;

>> +    uint64_t ib_result_mc_address;

>> +    struct amdgpu_cs_request ibs_request;

>> +    struct amdgpu_cs_ib_info ib_info;

>> +    struct amdgpu_cs_fence fence_status;

>> +    uint32_t expired;

>> +    int i, r;

>> +    amdgpu_bo_list_handle bo_list;

>> +    amdgpu_va_handle va_handle;

>> +    struct drm_amdgpu_info_hw_ip info;

>> +    uint32_t ring_id;

>> +

>> +    r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_DMA, 0, 

>> &info);

>> +    CU_ASSERT_EQUAL(r, 0);

>> +

>> +    r = amdgpu_cs_ctx_create(device_handle, &context_handle);

>> +    CU_ASSERT_EQUAL(r, 0);

>> +

>> +    for (ring_id = 0; (1 << ring_id) & info.available_rings; 

>> ring_id++) {

>> +        r = pthread_create(&stress_thread, NULL, write_mem_address, 

>> NULL);

>> +        CU_ASSERT_EQUAL(r, 0);

>> +

>> +        r = amdgpu_bo_alloc_and_map_raw(device_handle, 4096, 4096,

>> +                AMDGPU_GEM_DOMAIN_GTT, 0, use_uc_mtype ? 

>> AMDGPU_VM_MTYPE_UC : 0,

>> +                                &ib_result_handle, &ib_result_cpu,

>> +                                &ib_result_mc_address, &va_handle);

>> +        CU_ASSERT_EQUAL(r, 0);

>> +

>> +        r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,

>> +                       &bo_list);

>> +        CU_ASSERT_EQUAL(r, 0);

>> +

>> +        ptr = ib_result_cpu;

>> +        i = 0;

>> +

>> +        ptr[i++] = SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |

>> +                (0 << 26) | /* WAIT_REG_MEM */

>> +                (4 << 28) | /* != */

>> +                (1 << 31); /* memory */

>> +        ptr[i++] = (ib_result_mc_address + 256*4) & 0xfffffffc;

>> +        ptr[i++] = ((ib_result_mc_address + 256*4) >> 32) & 0xffffffff;

>> +        ptr[i++] = 0x00000000; /* reference value */

>> +        ptr[i++] = 0xffffffff; /* and mask */

>> +        ptr[i++] =  4 | /* poll interval */

>> +                (0xfff << 16); /* retry count */

>> +

>> +        for (; i < 16; i++)

>> +            ptr[i] = 0;

>> +

>> +        ptr[256] = 0x0; /* the memory we wait on to change */

>> +

>> +        memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));

>> +        ib_info.ib_mc_address = ib_result_mc_address;

>> +        ib_info.size = 16;

>> +

>> +        memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));

>> +        ibs_request.ip_type = AMDGPU_HW_IP_DMA;

>> +        ibs_request.ring = ring_id;

>> +        ibs_request.number_of_ibs = 1;

>> +        ibs_request.ibs = &ib_info;

>> +        ibs_request.resources = bo_list;

>> +        ibs_request.fence_info.handle = NULL;

>> +

>> +        for (i = 0; i < 200; i++) {

>> +            r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);

>> +            CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);

>> +

>> +        }

>> +

>> +        memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));

>> +        fence_status.context = context_handle;

>> +        fence_status.ip_type = AMDGPU_HW_IP_DMA;

>> +        fence_status.ip_instance = 0;

>> +        fence_status.ring = ring_id;

>> +        fence_status.fence = ibs_request.seq_no;

>> +

>> +        r = amdgpu_cs_query_fence_status(&fence_status,

>> +                AMDGPU_TIMEOUT_INFINITE,0, &expired);

>> +        CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);

>> +

>> +        pthread_join(stress_thread, NULL);

>> +

>> +        r = amdgpu_bo_list_destroy(bo_list);

>> +        CU_ASSERT_EQUAL(r, 0);

>> +

>> +        r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,

>> +                         ib_result_mc_address, 4096);

>> +        CU_ASSERT_EQUAL(r, 0);

>> +    }

>> +    r = amdgpu_cs_ctx_free(context_handle);

>> +    CU_ASSERT_EQUAL(r, 0);

>> +}

>> +

>>   static void bad_access_helper(int reg_access)

>>   {

>>       amdgpu_context_handle context_handle;

>