[2/5] drm/amdgpu: fix a bunch of coding style issues in amdgpu_ttm.c

Submitted by Christian König on Sept. 15, 2016, 1:10 p.m.

Details

Message ID 1473945024-8275-2-git-send-email-deathsimple@vodafone.de
State New
Headers show
Series "Series without cover letter" ( rev: 1 ) in AMD X.Org drivers

Not browsing as part of any series.

Commit Message

Christian König Sept. 15, 2016, 1:10 p.m.
From: Christian König <christian.koenig@amd.com>

No intented functional change.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 65 +++++++++++++++++++++------------
 1 file changed, 42 insertions(+), 23 deletions(-)

Patch hide | download patch | download mbox

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 428ffb6..1965209 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -214,9 +214,11 @@  static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 	switch (bo->mem.mem_type) {
 	case TTM_PL_VRAM:
 		if (rbo->adev->mman.buffer_funcs_ring->ready == false) {
-			amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU);
+			amdgpu_ttm_placement_from_domain(rbo,
+							 AMDGPU_GEM_DOMAIN_CPU);
 		} else {
-			amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_GTT);
+			amdgpu_ttm_placement_from_domain(rbo,
+							 AMDGPU_GEM_DOMAIN_GTT);
 			for (i = 0; i < rbo->placement.num_placement; ++i) {
 				if (!(rbo->placements[i].flags &
 				      TTM_PL_FLAG_TT))
@@ -524,7 +526,8 @@  memcpy:
 	return 0;
 }
 
-static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+				     struct ttm_mem_reg *mem)
 {
 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 	struct amdgpu_device *adev = amdgpu_get_adev(bdev);
@@ -545,7 +548,8 @@  static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
 	case TTM_PL_VRAM:
 		mem->bus.offset = mem->start << PAGE_SHIFT;
 		/* check if it's visible */
-		if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size)
+		if ((mem->bus.offset + mem->bus.size) >
+		    adev->mc.visible_vram_size)
 			return -EINVAL;
 		mem->bus.base = adev->mc.aper_base;
 		mem->bus.is_iomem = true;
@@ -579,7 +583,8 @@  static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
 	return 0;
 }
 
-static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev,
+				   struct ttm_mem_reg *mem)
 {
 }
 
@@ -791,7 +796,8 @@  int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
 	bo_mem.mem_type = TTM_PL_TT;
 	spin_lock(&adev->gtt_list_lock);
 	list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) {
-		flags = amdgpu_ttm_tt_pte_flags(gtt->adev, &gtt->ttm.ttm, &bo_mem);
+		flags = amdgpu_ttm_tt_pte_flags(gtt->adev, &gtt->ttm.ttm,
+						&bo_mem);
 		r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
 				     gtt->ttm.ttm.pages, gtt->ttm.dma_address,
 				     flags);
@@ -856,7 +862,8 @@  static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
 	}
 	gtt->ttm.ttm.func = &amdgpu_backend_func;
 	gtt->adev = adev;
-	if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
+	if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags,
+			    dummy_read_page)) {
 		kfree(gtt);
 		return NULL;
 	}
@@ -887,7 +894,8 @@  static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
 
 	if (slave && ttm->sg) {
 		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
-						 gtt->ttm.dma_address, ttm->num_pages);
+						 gtt->ttm.dma_address,
+						 ttm->num_pages);
 		ttm->state = tt_unbound;
 		return 0;
 	}
@@ -906,13 +914,17 @@  static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
 	}
 
 	for (i = 0; i < ttm->num_pages; i++) {
-		gtt->ttm.dma_address[i] = pci_map_page(adev->pdev, ttm->pages[i],
+		gtt->ttm.dma_address[i] = pci_map_page(adev->pdev,
+						       ttm->pages[i],
 						       0, PAGE_SIZE,
 						       PCI_DMA_BIDIRECTIONAL);
-		if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
+		if (pci_dma_mapping_error(adev->pdev,
+					  gtt->ttm.dma_address[i])) {
 			while (i--) {
-				pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
-					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+				pci_unmap_page(adev->pdev,
+					       gtt->ttm.dma_address[i],
+					       PAGE_SIZE,
+					       PCI_DMA_BIDIRECTIONAL);
 				gtt->ttm.dma_address[i] = 0;
 			}
 			ttm_pool_unpopulate(ttm);
@@ -1195,14 +1207,20 @@  int amdgpu_ttm_init(struct amdgpu_device *adev)
 		 (unsigned)(adev->mc.gtt_size / (1024 * 1024)));
 
 	adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
-	adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
-	adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT;
+	adev->gds.mem.gfx_partition_size =
+		adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
+	adev->gds.mem.cs_partition_size =
+		adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT;
 	adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT;
-	adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT;
-	adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT;
+	adev->gds.gws.gfx_partition_size =
+		adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT;
+	adev->gds.gws.cs_partition_size =
+		adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT;
 	adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT;
-	adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT;
-	adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT;
+	adev->gds.oa.gfx_partition_size =
+		adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT;
+	adev->gds.oa.cs_partition_size =
+		adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT;
 	/* GDS Memory */
 	r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
 				adev->gds.mem.total_size >> PAGE_SHIFT);
@@ -1362,10 +1380,9 @@  error_free:
 	return r;
 }
 
-int amdgpu_fill_buffer(struct amdgpu_bo *bo,
-		uint32_t src_data,
-		struct reservation_object *resv,
-		struct fence **fence)
+int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t src_data,
+		       struct reservation_object *resv,
+		       struct fence **fence)
 {
 	struct amdgpu_device *adev = bo->adev;
 	struct amdgpu_job *job;
@@ -1607,5 +1624,7 @@  static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
 
 u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev)
 {
-	return ttm_get_kernel_zone_memory_size(adev->mman.mem_global_ref.object);
+	struct ttm_mem_global *glob = adev->mman.mem_global_ref.object;
+
+	return ttm_get_kernel_zone_memory_size(glob);
 }

Comments

On Thu, Sep 15, 2016 at 9:10 AM, Christian König
<deathsimple@vodafone.de> wrote:
> From: Christian König <christian.koenig@amd.com>
>
> No intented functional change.

"intended"

In general, with respect to the white space changes, I think the code
is more readable as is, but I don't have a strong opinion either way,
if you prefer this.  One additional comment below.

>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 65 +++++++++++++++++++++------------
>  1 file changed, 42 insertions(+), 23 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index 428ffb6..1965209 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -214,9 +214,11 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
>         switch (bo->mem.mem_type) {
>         case TTM_PL_VRAM:
>                 if (rbo->adev->mman.buffer_funcs_ring->ready == false) {
> -                       amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU);
> +                       amdgpu_ttm_placement_from_domain(rbo,
> +                                                        AMDGPU_GEM_DOMAIN_CPU);
>                 } else {
> -                       amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_GTT);
> +                       amdgpu_ttm_placement_from_domain(rbo,
> +                                                        AMDGPU_GEM_DOMAIN_GTT);
>                         for (i = 0; i < rbo->placement.num_placement; ++i) {
>                                 if (!(rbo->placements[i].flags &
>                                       TTM_PL_FLAG_TT))
> @@ -524,7 +526,8 @@ memcpy:
>         return 0;
>  }
>
> -static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
> +static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
> +                                    struct ttm_mem_reg *mem)
>  {
>         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
>         struct amdgpu_device *adev = amdgpu_get_adev(bdev);
> @@ -545,7 +548,8 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
>         case TTM_PL_VRAM:
>                 mem->bus.offset = mem->start << PAGE_SHIFT;
>                 /* check if it's visible */
> -               if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size)
> +               if ((mem->bus.offset + mem->bus.size) >
> +                   adev->mc.visible_vram_size)
>                         return -EINVAL;
>                 mem->bus.base = adev->mc.aper_base;
>                 mem->bus.is_iomem = true;
> @@ -579,7 +583,8 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
>         return 0;
>  }
>
> -static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
> +static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev,
> +                                  struct ttm_mem_reg *mem)
>  {
>  }
>
> @@ -791,7 +796,8 @@ int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
>         bo_mem.mem_type = TTM_PL_TT;
>         spin_lock(&adev->gtt_list_lock);
>         list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) {
> -               flags = amdgpu_ttm_tt_pte_flags(gtt->adev, &gtt->ttm.ttm, &bo_mem);
> +               flags = amdgpu_ttm_tt_pte_flags(gtt->adev, &gtt->ttm.ttm,
> +                                               &bo_mem);
>                 r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
>                                      gtt->ttm.ttm.pages, gtt->ttm.dma_address,
>                                      flags);
> @@ -856,7 +862,8 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
>         }
>         gtt->ttm.ttm.func = &amdgpu_backend_func;
>         gtt->adev = adev;
> -       if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
> +       if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags,
> +                           dummy_read_page)) {
>                 kfree(gtt);
>                 return NULL;
>         }
> @@ -887,7 +894,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
>
>         if (slave && ttm->sg) {
>                 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
> -                                                gtt->ttm.dma_address, ttm->num_pages);
> +                                                gtt->ttm.dma_address,
> +                                                ttm->num_pages);
>                 ttm->state = tt_unbound;
>                 return 0;
>         }
> @@ -906,13 +914,17 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
>         }
>
>         for (i = 0; i < ttm->num_pages; i++) {
> -               gtt->ttm.dma_address[i] = pci_map_page(adev->pdev, ttm->pages[i],
> +               gtt->ttm.dma_address[i] = pci_map_page(adev->pdev,
> +                                                      ttm->pages[i],
>                                                        0, PAGE_SIZE,
>                                                        PCI_DMA_BIDIRECTIONAL);
> -               if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
> +               if (pci_dma_mapping_error(adev->pdev,
> +                                         gtt->ttm.dma_address[i])) {
>                         while (i--) {
> -                               pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
> -                                              PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
> +                               pci_unmap_page(adev->pdev,
> +                                              gtt->ttm.dma_address[i],
> +                                              PAGE_SIZE,
> +                                              PCI_DMA_BIDIRECTIONAL);
>                                 gtt->ttm.dma_address[i] = 0;
>                         }
>                         ttm_pool_unpopulate(ttm);
> @@ -1195,14 +1207,20 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
>                  (unsigned)(adev->mc.gtt_size / (1024 * 1024)));
>
>         adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
> -       adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
> -       adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT;
> +       adev->gds.mem.gfx_partition_size =
> +               adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
> +       adev->gds.mem.cs_partition_size =
> +               adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT;
>         adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT;
> -       adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT;
> -       adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT;
> +       adev->gds.gws.gfx_partition_size =
> +               adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT;
> +       adev->gds.gws.cs_partition_size =
> +               adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT;
>         adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT;
> -       adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT;
> -       adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT;
> +       adev->gds.oa.gfx_partition_size =
> +               adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT;
> +       adev->gds.oa.cs_partition_size =
> +               adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT;
>         /* GDS Memory */
>         r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
>                                 adev->gds.mem.total_size >> PAGE_SHIFT);
> @@ -1362,10 +1380,9 @@ error_free:
>         return r;
>  }
>
> -int amdgpu_fill_buffer(struct amdgpu_bo *bo,
> -               uint32_t src_data,
> -               struct reservation_object *resv,
> -               struct fence **fence)
> +int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t src_data,
> +                      struct reservation_object *resv,
> +                      struct fence **fence)
>  {
>         struct amdgpu_device *adev = bo->adev;
>         struct amdgpu_job *job;
> @@ -1607,5 +1624,7 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
>
>  u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev)
>  {
> -       return ttm_get_kernel_zone_memory_size(adev->mman.mem_global_ref.object);
> +       struct ttm_mem_global *glob = adev->mman.mem_global_ref.object;
> +
> +       return ttm_get_kernel_zone_memory_size(glob);


Was this change intended?  There were just a bunch of patches applied
that did the opposite.

Alex

>  }
> --
> 2.5.0
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
On 15/09/16 10:10 PM, Christian König wrote:
> From: Christian König <christian.koenig@amd.com>
> 
> No intented functional change.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 65 +++++++++++++++++++++------------
>  1 file changed, 42 insertions(+), 23 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index 428ffb6..1965209 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -214,9 +214,11 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
>  	switch (bo->mem.mem_type) {
>  	case TTM_PL_VRAM:
>  		if (rbo->adev->mman.buffer_funcs_ring->ready == false) {
> -			amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU);
> +			amdgpu_ttm_placement_from_domain(rbo,
> +							 AMDGPU_GEM_DOMAIN_CPU);
>  		} else {
> -			amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_GTT);
> +			amdgpu_ttm_placement_from_domain(rbo,
> +							 AMDGPU_GEM_DOMAIN_GTT);

I don't see the point of this kind of change. We save all of 5 columns
of horizontal space, but for that we reduce readability by breaking up a
single line statement to multiple lines.

It makes more sense when saving a more significant amount of horizontal
space or when the statement already spans multiple lines though.
Am 16.09.2016 um 04:17 schrieb Michel Dänzer:
> On 15/09/16 10:10 PM, Christian König wrote:
>> From: Christian König <christian.koenig@amd.com>
>>
>> No intented functional change.
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 65 +++++++++++++++++++++------------
>>   1 file changed, 42 insertions(+), 23 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> index 428ffb6..1965209 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> @@ -214,9 +214,11 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
>>   	switch (bo->mem.mem_type) {
>>   	case TTM_PL_VRAM:
>>   		if (rbo->adev->mman.buffer_funcs_ring->ready == false) {
>> -			amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU);
>> +			amdgpu_ttm_placement_from_domain(rbo,
>> +							 AMDGPU_GEM_DOMAIN_CPU);
>>   		} else {
>> -			amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_GTT);
>> +			amdgpu_ttm_placement_from_domain(rbo,
>> +							 AMDGPU_GEM_DOMAIN_GTT);
> I don't see the point of this kind of change. We save all of 5 columns
> of horizontal space, but for that we reduce readability by breaking up a
> single line statement to multiple lines.
>
> It makes more sense when saving a more significant amount of horizontal
> space or when the statement already spans multiple lines though.
>

I was mostly just following what the auto formatter suggested, but now 
that you say it it's probably not such a good idea in some cases.

Should I just drop those two patches or just fix what Alex and you have 
noted and send them again? Some of the changes still seem to make sense 
to me.

Regards,
Christain.