[2/2] drm/ttm: fix ttm_bo_cleanup_refs_or_queue once more

Submitted by Christian König on Sept. 4, 2017, 7:02 p.m.

Details

Message ID 1504551766-5093-2-git-send-email-deathsimple@vodafone.de
State Accepted
Commit 378e2d5b504fe0231c557751e58b80fcf717cc20
Headers show
Series "Series without cover letter" ( rev: 2 1 ) in AMD X.Org drivers

Browsing this patch as part of:
"Series without cover letter" rev 2 in AMD X.Org drivers
<< prev patch [1/2] next patch >>

Commit Message

Christian König Sept. 4, 2017, 7:02 p.m.
From: Christian König <christian.koenig@amd.com>

With shared reservation objects __ttm_bo_reserve() can easily fail even on
destroyed BOs. This prevents correct handling when we need to individualize
the reservation object.

Fix this by individualizing the object before even trying to reserve it.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c | 32 +++++++++++++++++---------------
 1 file changed, 17 insertions(+), 15 deletions(-)

Patch hide | download patch | download mbox

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 180ce62..bee77d3 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -440,28 +440,29 @@  static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
 	struct ttm_bo_global *glob = bo->glob;
 	int ret;
 
+	ret = ttm_bo_individualize_resv(bo);
+	if (ret) {
+		/* Last resort, if we fail to allocate memory for the
+		 * fences block for the BO to become idle
+		 */
+		reservation_object_wait_timeout_rcu(bo->resv, true, false,
+						    30 * HZ);
+		spin_lock(&glob->lru_lock);
+		goto error;
+	}
+
 	spin_lock(&glob->lru_lock);
 	ret = __ttm_bo_reserve(bo, false, true, NULL);
-
 	if (!ret) {
-		if (!ttm_bo_wait(bo, false, true)) {
+		if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) {
 			ttm_bo_del_from_lru(bo);
 			spin_unlock(&glob->lru_lock);
+			if (bo->resv != &bo->ttm_resv)
+				reservation_object_unlock(&bo->ttm_resv);
 			ttm_bo_cleanup_memtype_use(bo);
-
 			return;
 		}
 
-		ret = ttm_bo_individualize_resv(bo);
-		if (ret) {
-			/* Last resort, if we fail to allocate memory for the
-			 * fences block for the BO to become idle and free it.
-			 */
-			spin_unlock(&glob->lru_lock);
-			ttm_bo_wait(bo, true, true);
-			ttm_bo_cleanup_memtype_use(bo);
-			return;
-		}
 		ttm_bo_flush_all_fences(bo);
 
 		/*
@@ -474,11 +475,12 @@  static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
 			ttm_bo_add_to_lru(bo);
 		}
 
-		if (bo->resv != &bo->ttm_resv)
-			reservation_object_unlock(&bo->ttm_resv);
 		__ttm_bo_unreserve(bo);
 	}
+	if (bo->resv != &bo->ttm_resv)
+		reservation_object_unlock(&bo->ttm_resv);
 
+error:
 	kref_get(&bo->list_kref);
 	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
 	spin_unlock(&glob->lru_lock);

Comments

Acked-by: Chunming Zhou <david1.zhou@amd.com>


On 2017年09月05日 03:02, Christian König wrote:
> From: Christian König <christian.koenig@amd.com>
>
> With shared reservation objects __ttm_bo_reserve() can easily fail even on
> destroyed BOs. This prevents correct handling when we need to individualize
> the reservation object.
>
> Fix this by individualizing the object before even trying to reserve it.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/gpu/drm/ttm/ttm_bo.c | 32 +++++++++++++++++---------------
>   1 file changed, 17 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> index 180ce62..bee77d3 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -440,28 +440,29 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
>   	struct ttm_bo_global *glob = bo->glob;
>   	int ret;
>   
> +	ret = ttm_bo_individualize_resv(bo);
> +	if (ret) {
> +		/* Last resort, if we fail to allocate memory for the
> +		 * fences block for the BO to become idle
> +		 */
> +		reservation_object_wait_timeout_rcu(bo->resv, true, false,
> +						    30 * HZ);
> +		spin_lock(&glob->lru_lock);
> +		goto error;
> +	}
> +
>   	spin_lock(&glob->lru_lock);
>   	ret = __ttm_bo_reserve(bo, false, true, NULL);
> -
>   	if (!ret) {
> -		if (!ttm_bo_wait(bo, false, true)) {
> +		if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) {
>   			ttm_bo_del_from_lru(bo);
>   			spin_unlock(&glob->lru_lock);
> +			if (bo->resv != &bo->ttm_resv)
> +				reservation_object_unlock(&bo->ttm_resv);
>   			ttm_bo_cleanup_memtype_use(bo);
> -
>   			return;
>   		}
>   
> -		ret = ttm_bo_individualize_resv(bo);
> -		if (ret) {
> -			/* Last resort, if we fail to allocate memory for the
> -			 * fences block for the BO to become idle and free it.
> -			 */
> -			spin_unlock(&glob->lru_lock);
> -			ttm_bo_wait(bo, true, true);
> -			ttm_bo_cleanup_memtype_use(bo);
> -			return;
> -		}
>   		ttm_bo_flush_all_fences(bo);
>   
>   		/*
> @@ -474,11 +475,12 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
>   			ttm_bo_add_to_lru(bo);
>   		}
>   
> -		if (bo->resv != &bo->ttm_resv)
> -			reservation_object_unlock(&bo->ttm_resv);
>   		__ttm_bo_unreserve(bo);
>   	}
> +	if (bo->resv != &bo->ttm_resv)
> +		reservation_object_unlock(&bo->ttm_resv);
>   
> +error:
>   	kref_get(&bo->list_kref);
>   	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
>   	spin_unlock(&glob->lru_lock);