[8/8] drm/amdgpu: rework page directory filling

Submitted by Christian König on Aug. 10, 2017, 2:50 p.m.

Details

Message ID 1502376647-4519-8-git-send-email-deathsimple@vodafone.de
State New
Headers show
Series "Series without cover letter" ( rev: 1 ) in AMD X.Org drivers

Not browsing as part of any series.

Commit Message

Christian König Aug. 10, 2017, 2:50 p.m.
From: Christian König <christian.koenig@amd.com>

Keep track off relocated PDs/PTs instead of walking and checking all PDs.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 85 ++++++++++++++++++++++------------
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |  3 ++
 2 files changed, 59 insertions(+), 29 deletions(-)

Patch hide | download patch | download mbox

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 8e2ce91..8056e4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -204,7 +204,10 @@  int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 		}
 
 		spin_lock(&vm->status_lock);
-		list_del_init(&bo_base->vm_status);
+		if (bo->parent)
+			list_move(&bo_base->vm_status, &vm->relocated);
+		else
+			list_del_init(&bo_base->vm_status);
 	}
 	spin_unlock(&vm->status_lock);
 
@@ -303,8 +306,10 @@  static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
 			entry->base.vm = vm;
 			entry->base.bo = pt;
 			list_add_tail(&entry->base.bo_list, &pt->va);
-			INIT_LIST_HEAD(&entry->base.vm_status);
-			entry->addr = 0;
+			spin_lock(&vm->status_lock);
+			list_add(&entry->base.vm_status, &vm->relocated);
+			spin_unlock(&vm->status_lock);
+			entry->addr = ~0ULL;
 		}
 
 		if (level < adev->vm_manager.num_level) {
@@ -989,18 +994,17 @@  static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
  */
 static int amdgpu_vm_update_level(struct amdgpu_device *adev,
 				  struct amdgpu_vm *vm,
-				  struct amdgpu_vm_pt *parent,
-				  unsigned level)
+				  struct amdgpu_vm_pt *parent)
 {
 	struct amdgpu_bo *shadow;
 	struct amdgpu_ring *ring = NULL;
 	uint64_t pd_addr, shadow_addr = 0;
-	uint32_t incr = amdgpu_vm_bo_size(adev, level + 1);
 	uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
 	unsigned count = 0, pt_idx, ndw = 0;
 	struct amdgpu_job *job;
 	struct amdgpu_pte_update_params params;
 	struct dma_fence *fence = NULL;
+	uint32_t incr;
 
 	int r;
 
@@ -1048,12 +1052,17 @@  static int amdgpu_vm_update_level(struct amdgpu_device *adev,
 
 	/* walk over the address space and update the directory */
 	for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
-		struct amdgpu_bo *bo = parent->entries[pt_idx].base.bo;
+		struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
+		struct amdgpu_bo *bo = entry->base.bo;
 		uint64_t pde, pt;
 
 		if (bo == NULL)
 			continue;
 
+		spin_lock(&vm->status_lock);
+		list_del_init(&entry->base.vm_status);
+		spin_unlock(&vm->status_lock);
+
 		pt = amdgpu_bo_gpu_offset(bo);
 		pt = amdgpu_gart_get_vm_pde(adev, pt);
 		if ((parent->entries[pt_idx].addr & AMDGPU_PDE_PTE) ||
@@ -1063,6 +1072,7 @@  static int amdgpu_vm_update_level(struct amdgpu_device *adev,
 		parent->entries[pt_idx].addr = pt | AMDGPU_PTE_VALID;
 
 		pde = pd_addr + pt_idx * 8;
+		incr = amdgpu_bo_size(bo);
 		if (((last_pde + 8 * count) != pde) ||
 		    ((last_pt + incr * count) != pt) ||
 		    (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
@@ -1123,20 +1133,6 @@  static int amdgpu_vm_update_level(struct amdgpu_device *adev,
 			dma_fence_put(fence);
 		}
 	}
-	/*
-	 * Recurse into the subdirectories. This recursion is harmless because
-	 * we only have a maximum of 5 layers.
-	 */
-	for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
-		struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
-
-		if (!entry->base.bo)
-			continue;
-
-		r = amdgpu_vm_update_level(adev, vm, entry, level + 1);
-		if (r)
-			return r;
-	}
 
 	return 0;
 
@@ -1152,7 +1148,8 @@  static int amdgpu_vm_update_level(struct amdgpu_device *adev,
  *
  * Mark all PD level as invalid after an error.
  */
-static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent)
+static void amdgpu_vm_invalidate_level(struct amdgpu_vm *vm,
+				       struct amdgpu_vm_pt *parent)
 {
 	unsigned pt_idx;
 
@@ -1167,7 +1164,10 @@  static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent)
 			continue;
 
 		entry->addr = ~0ULL;
-		amdgpu_vm_invalidate_level(entry);
+		spin_lock(&vm->status_lock);
+		list_move(&entry->base.vm_status, &vm->relocated);
+		spin_unlock(&vm->status_lock);
+		amdgpu_vm_invalidate_level(vm, entry);
 	}
 }
 
@@ -1185,9 +1185,31 @@  int amdgpu_vm_update_directories(struct amdgpu_device *adev,
 {
 	int r;
 
-	r = amdgpu_vm_update_level(adev, vm, &vm->root, 0);
-	if (r)
-		amdgpu_vm_invalidate_level(&vm->root);
+	spin_lock(&vm->status_lock);
+	while (!list_empty(&vm->relocated)) {
+		struct amdgpu_vm_bo_base *bo_base, *parent;
+		struct amdgpu_vm_pt *pt;
+		struct amdgpu_bo *bo;
+
+		bo_base = list_first_entry(&vm->relocated,
+					   struct amdgpu_vm_bo_base,
+					   vm_status);
+		spin_unlock(&vm->status_lock);
+
+		bo = bo_base->bo->parent;
+		parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base,
+					  bo_list);
+		pt = container_of(parent, struct amdgpu_vm_pt, base);
+
+		r = amdgpu_vm_update_level(adev, vm, pt);
+		if (r) {
+			amdgpu_vm_invalidate_level(vm, &vm->root);
+			break;
+		}
+
+		spin_lock(&vm->status_lock);
+	}
+	spin_unlock(&vm->status_lock);
 
 	if (vm->use_cpu_for_update) {
 		/* Flush HDP */
@@ -1575,7 +1597,7 @@  static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 
 error_free:
 	amdgpu_job_free(job);
-	amdgpu_vm_invalidate_level(&vm->root);
+	amdgpu_vm_invalidate_level(vm, &vm->root);
 	return r;
 }
 
@@ -2374,9 +2396,13 @@  void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
 			continue;
 		}
 
-		/* Don't add page tables to the moved state */
-		if (bo->tbo.type == ttm_bo_type_kernel)
+		if (bo->tbo.type == ttm_bo_type_kernel) {
+			spin_lock(&bo_base->vm->status_lock);
+			if (list_empty(&bo_base->vm_status))
+				list_add(&bo_base->vm_status, &vm->relocated);
+			spin_unlock(&bo_base->vm->status_lock);
 			continue;
+		}
 
 		spin_lock(&bo_base->vm->status_lock);
 		if (list_empty(&bo_base->vm_status))
@@ -2451,6 +2477,7 @@  int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 		vm->reserved_vmid[i] = NULL;
 	spin_lock_init(&vm->status_lock);
 	INIT_LIST_HEAD(&vm->evicted);
+	INIT_LIST_HEAD(&vm->relocated);
 	INIT_LIST_HEAD(&vm->moved);
 	INIT_LIST_HEAD(&vm->cleared);
 	INIT_LIST_HEAD(&vm->freed);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index a649950..bc1c995 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -131,6 +131,9 @@  struct amdgpu_vm {
 	/* BOs who needs a validation */
 	struct list_head	evicted;
 
+	/* PT BOs which relocated and their parent need an update */
+	struct list_head	relocated;
+
 	/* BOs moved, but not yet updated in the PT */
 	struct list_head	moved;
 

Comments

> -----Original Message-----

> From: amd-gfx [mailto:amd-gfx-bounces@lists.freedesktop.org] On Behalf

> Of Christian König

> Sent: Thursday, August 10, 2017 10:51 AM

> To: amd-gfx@lists.freedesktop.org

> Subject: [PATCH 8/8] drm/amdgpu: rework page directory filling

> 

> From: Christian König <christian.koenig@amd.com>

> 

> Keep track off relocated PDs/PTs instead of walking and checking all PDs.

> 

> Signed-off-by: Christian König <christian.koenig@amd.com>


Some comments on patch 4.  The rest are:
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>


> ---

>  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 85

> ++++++++++++++++++++++------------

>  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |  3 ++

>  2 files changed, 59 insertions(+), 29 deletions(-)

> 

> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

> index 8e2ce91..8056e4a 100644

> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

> @@ -204,7 +204,10 @@ int amdgpu_vm_validate_pt_bos(struct

> amdgpu_device *adev, struct amdgpu_vm *vm,

>  		}

> 

>  		spin_lock(&vm->status_lock);

> -		list_del_init(&bo_base->vm_status);

> +		if (bo->parent)

> +			list_move(&bo_base->vm_status, &vm->relocated);

> +		else

> +			list_del_init(&bo_base->vm_status);

>  	}

>  	spin_unlock(&vm->status_lock);

> 

> @@ -303,8 +306,10 @@ static int amdgpu_vm_alloc_levels(struct

> amdgpu_device *adev,

>  			entry->base.vm = vm;

>  			entry->base.bo = pt;

>  			list_add_tail(&entry->base.bo_list, &pt->va);

> -			INIT_LIST_HEAD(&entry->base.vm_status);

> -			entry->addr = 0;

> +			spin_lock(&vm->status_lock);

> +			list_add(&entry->base.vm_status, &vm->relocated);

> +			spin_unlock(&vm->status_lock);

> +			entry->addr = ~0ULL;

>  		}

> 

>  		if (level < adev->vm_manager.num_level) {

> @@ -989,18 +994,17 @@ static int amdgpu_vm_wait_pd(struct

> amdgpu_device *adev, struct amdgpu_vm *vm,

>   */

>  static int amdgpu_vm_update_level(struct amdgpu_device *adev,

>  				  struct amdgpu_vm *vm,

> -				  struct amdgpu_vm_pt *parent,

> -				  unsigned level)

> +				  struct amdgpu_vm_pt *parent)

>  {

>  	struct amdgpu_bo *shadow;

>  	struct amdgpu_ring *ring = NULL;

>  	uint64_t pd_addr, shadow_addr = 0;

> -	uint32_t incr = amdgpu_vm_bo_size(adev, level + 1);

>  	uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;

>  	unsigned count = 0, pt_idx, ndw = 0;

>  	struct amdgpu_job *job;

>  	struct amdgpu_pte_update_params params;

>  	struct dma_fence *fence = NULL;

> +	uint32_t incr;

> 

>  	int r;

> 

> @@ -1048,12 +1052,17 @@ static int amdgpu_vm_update_level(struct

> amdgpu_device *adev,

> 

>  	/* walk over the address space and update the directory */

>  	for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {

> -		struct amdgpu_bo *bo = parent->entries[pt_idx].base.bo;

> +		struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];

> +		struct amdgpu_bo *bo = entry->base.bo;

>  		uint64_t pde, pt;

> 

>  		if (bo == NULL)

>  			continue;

> 

> +		spin_lock(&vm->status_lock);

> +		list_del_init(&entry->base.vm_status);

> +		spin_unlock(&vm->status_lock);

> +

>  		pt = amdgpu_bo_gpu_offset(bo);

>  		pt = amdgpu_gart_get_vm_pde(adev, pt);

>  		if ((parent->entries[pt_idx].addr & AMDGPU_PDE_PTE) ||

> @@ -1063,6 +1072,7 @@ static int amdgpu_vm_update_level(struct

> amdgpu_device *adev,

>  		parent->entries[pt_idx].addr = pt | AMDGPU_PTE_VALID;

> 

>  		pde = pd_addr + pt_idx * 8;

> +		incr = amdgpu_bo_size(bo);

>  		if (((last_pde + 8 * count) != pde) ||

>  		    ((last_pt + incr * count) != pt) ||

>  		    (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {

> @@ -1123,20 +1133,6 @@ static int amdgpu_vm_update_level(struct

> amdgpu_device *adev,

>  			dma_fence_put(fence);

>  		}

>  	}

> -	/*

> -	 * Recurse into the subdirectories. This recursion is harmless because

> -	 * we only have a maximum of 5 layers.

> -	 */

> -	for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {

> -		struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];

> -

> -		if (!entry->base.bo)

> -			continue;

> -

> -		r = amdgpu_vm_update_level(adev, vm, entry, level + 1);

> -		if (r)

> -			return r;

> -	}

> 

>  	return 0;

> 

> @@ -1152,7 +1148,8 @@ static int amdgpu_vm_update_level(struct

> amdgpu_device *adev,

>   *

>   * Mark all PD level as invalid after an error.

>   */

> -static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent)

> +static void amdgpu_vm_invalidate_level(struct amdgpu_vm *vm,

> +				       struct amdgpu_vm_pt *parent)

>  {

>  	unsigned pt_idx;

> 

> @@ -1167,7 +1164,10 @@ static void amdgpu_vm_invalidate_level(struct

> amdgpu_vm_pt *parent)

>  			continue;

> 

>  		entry->addr = ~0ULL;

> -		amdgpu_vm_invalidate_level(entry);

> +		spin_lock(&vm->status_lock);

> +		list_move(&entry->base.vm_status, &vm->relocated);

> +		spin_unlock(&vm->status_lock);

> +		amdgpu_vm_invalidate_level(vm, entry);

>  	}

>  }

> 

> @@ -1185,9 +1185,31 @@ int amdgpu_vm_update_directories(struct

> amdgpu_device *adev,

>  {

>  	int r;

> 

> -	r = amdgpu_vm_update_level(adev, vm, &vm->root, 0);

> -	if (r)

> -		amdgpu_vm_invalidate_level(&vm->root);

> +	spin_lock(&vm->status_lock);

> +	while (!list_empty(&vm->relocated)) {

> +		struct amdgpu_vm_bo_base *bo_base, *parent;

> +		struct amdgpu_vm_pt *pt;

> +		struct amdgpu_bo *bo;

> +

> +		bo_base = list_first_entry(&vm->relocated,

> +					   struct amdgpu_vm_bo_base,

> +					   vm_status);

> +		spin_unlock(&vm->status_lock);

> +

> +		bo = bo_base->bo->parent;

> +		parent = list_first_entry(&bo->va, struct

> amdgpu_vm_bo_base,

> +					  bo_list);

> +		pt = container_of(parent, struct amdgpu_vm_pt, base);

> +

> +		r = amdgpu_vm_update_level(adev, vm, pt);

> +		if (r) {

> +			amdgpu_vm_invalidate_level(vm, &vm->root);

> +			break;

> +		}

> +

> +		spin_lock(&vm->status_lock);

> +	}

> +	spin_unlock(&vm->status_lock);

> 

>  	if (vm->use_cpu_for_update) {

>  		/* Flush HDP */

> @@ -1575,7 +1597,7 @@ static int amdgpu_vm_bo_update_mapping(struct

> amdgpu_device *adev,

> 

>  error_free:

>  	amdgpu_job_free(job);

> -	amdgpu_vm_invalidate_level(&vm->root);

> +	amdgpu_vm_invalidate_level(vm, &vm->root);

>  	return r;

>  }

> 

> @@ -2374,9 +2396,13 @@ void amdgpu_vm_bo_invalidate(struct

> amdgpu_device *adev,

>  			continue;

>  		}

> 

> -		/* Don't add page tables to the moved state */

> -		if (bo->tbo.type == ttm_bo_type_kernel)

> +		if (bo->tbo.type == ttm_bo_type_kernel) {

> +			spin_lock(&bo_base->vm->status_lock);

> +			if (list_empty(&bo_base->vm_status))

> +				list_add(&bo_base->vm_status, &vm-

> >relocated);

> +			spin_unlock(&bo_base->vm->status_lock);

>  			continue;

> +		}

> 

>  		spin_lock(&bo_base->vm->status_lock);

>  		if (list_empty(&bo_base->vm_status))

> @@ -2451,6 +2477,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev,

> struct amdgpu_vm *vm,

>  		vm->reserved_vmid[i] = NULL;

>  	spin_lock_init(&vm->status_lock);

>  	INIT_LIST_HEAD(&vm->evicted);

> +	INIT_LIST_HEAD(&vm->relocated);

>  	INIT_LIST_HEAD(&vm->moved);

>  	INIT_LIST_HEAD(&vm->cleared);

>  	INIT_LIST_HEAD(&vm->freed);

> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h

> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h

> index a649950..bc1c995 100644

> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h

> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h

> @@ -131,6 +131,9 @@ struct amdgpu_vm {

>  	/* BOs who needs a validation */

>  	struct list_head	evicted;

> 

> +	/* PT BOs which relocated and their parent need an update */

> +	struct list_head	relocated;

> +

>  	/* BOs moved, but not yet updated in the PT */

>  	struct list_head	moved;

> 

> --

> 2.7.4

> 

> _______________________________________________

> amd-gfx mailing list

> amd-gfx@lists.freedesktop.org

> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
On 2017-08-10 10:50 AM, Christian König wrote:
> @@ -1185,9 +1185,31 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
>  {
>  	int r;
>  
> -	r = amdgpu_vm_update_level(adev, vm, &vm->root, 0);
> -	if (r)
> -		amdgpu_vm_invalidate_level(&vm->root);
> +	spin_lock(&vm->status_lock);
> +	while (!list_empty(&vm->relocated)) {
> +		struct amdgpu_vm_bo_base *bo_base, *parent;
> +		struct amdgpu_vm_pt *pt;
> +		struct amdgpu_bo *bo;
> +
> +		bo_base = list_first_entry(&vm->relocated,
> +					   struct amdgpu_vm_bo_base,
> +					   vm_status);
> +		spin_unlock(&vm->status_lock);
> +
> +		bo = bo_base->bo->parent;

For the root page directory I think parent bo->parent will be NULL. Can
the root page directory be relocated. If yes, this needs to be handled
as a special case here.

Regards,
  Felix

> +		parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base,
> +					  bo_list);
> +		pt = container_of(parent, struct amdgpu_vm_pt, base);
> +
> +		r = amdgpu_vm_update_level(adev, vm, pt);
> +		if (r) {
> +			amdgpu_vm_invalidate_level(vm, &vm->root);
> +			break;
> +		}
> +
> +		spin_lock(&vm->status_lock);
> +	}
> +	spin_unlock(&vm->status_lock);
>  
>  	if (vm->use_cpu_for_update) {
>  		/* Flush HDP */