[v2,34/37] panfrost: Do fine-grained flushing when preparing BO for CPU accesses

Submitted by Boris Brezillon on Sept. 16, 2019, 9:37 a.m.

Details

Message ID 20190916093715.32203-35-boris.brezillon@collabora.com
State New
Headers show
Series "panfrost: Support batch pipelining" ( rev: 1 ) in Mesa

Not browsing as part of any series.

Commit Message

Boris Brezillon Sept. 16, 2019, 9:37 a.m.
We don't have to flush all batches when we're only interested in
reading/writing a specific BO. Thanks to the
panfrost_flush_batches_accessing_bo() and panfrost_bo_wait() helpers
we can now flush only the batches touching the BO we want to access
from the CPU.

Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
---
 src/gallium/drivers/panfrost/pan_resource.c | 27 +++++++++------------
 1 file changed, 12 insertions(+), 15 deletions(-)

Patch hide | download patch | download mbox

diff --git a/src/gallium/drivers/panfrost/pan_resource.c b/src/gallium/drivers/panfrost/pan_resource.c
index 1f7605adcd5d..d59529ff15b7 100644
--- a/src/gallium/drivers/panfrost/pan_resource.c
+++ b/src/gallium/drivers/panfrost/pan_resource.c
@@ -578,10 +578,8 @@  panfrost_transfer_map(struct pipe_context *pctx,
                         is_bound |= fb->cbufs[c]->texture == resource;
         }
 
-        if (is_bound && (usage & PIPE_TRANSFER_READ)) {
-                assert(level == 0);
-                panfrost_flush_all_batches(ctx, true);
-        }
+        if (is_bound && (usage & PIPE_TRANSFER_READ))
+                 assert(level == 0);
 
         /* TODO: Respect usage flags */
 
@@ -594,11 +592,11 @@  panfrost_transfer_map(struct pipe_context *pctx,
                 /* No flush for writes to uninitialized */
         } else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
                 if (usage & PIPE_TRANSFER_WRITE) {
-                        /* STUB: flush reading */
-                        //printf("debug: missed reading flush %d\n", resource->target);
+                        panfrost_flush_batches_accessing_bo(ctx, bo, PAN_BO_GPU_ACCESS_RW);
+                        panfrost_bo_wait(bo, INT64_MAX, PAN_BO_GPU_ACCESS_RW);
                 } else if (usage & PIPE_TRANSFER_READ) {
-                        /* STUB: flush writing */
-                        //printf("debug: missed writing flush %d (%d-%d)\n", resource->target, box->x, box->x + box->width);
+                        panfrost_flush_batches_accessing_bo(ctx, bo, PAN_BO_GPU_ACCESS_WRITE);
+                        panfrost_bo_wait(bo, INT64_MAX, PAN_BO_GPU_ACCESS_WRITE);
                 } else {
                         /* Why are you even mapping?! */
                 }
@@ -748,11 +746,8 @@  panfrost_generate_mipmap(
          * reorder-type optimizations in place. But for now prioritize
          * correctness. */
 
-        struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
-        bool has_draws = batch->last_job.gpu;
-
-        if (has_draws)
-                panfrost_flush_all_batches(ctx, true);
+        panfrost_flush_batches_accessing_bo(ctx, rsrc->bo, PAN_BO_GPU_ACCESS_RW);
+        panfrost_bo_wait(rsrc->bo, INT64_MAX, PAN_BO_GPU_ACCESS_RW);
 
         /* We've flushed the original buffer if needed, now trigger a blit */
 
@@ -765,8 +760,10 @@  panfrost_generate_mipmap(
         /* If the blit was successful, flush once more. If it wasn't, well, let
          * the state tracker deal with it. */
 
-        if (blit_res)
-                panfrost_flush_all_batches(ctx, true);
+        if (blit_res) {
+                panfrost_flush_batches_accessing_bo(ctx, rsrc->bo, PAN_BO_GPU_ACCESS_WRITE);
+                panfrost_bo_wait(rsrc->bo, INT64_MAX, PAN_BO_GPU_ACCESS_WRITE);
+        }
 
         return blit_res;
 }

Comments

R-b

On Mon, Sep 16, 2019 at 11:37:12AM +0200, Boris Brezillon wrote:
> We don't have to flush all batches when we're only interested in
> reading/writing a specific BO. Thanks to the
> panfrost_flush_batches_accessing_bo() and panfrost_bo_wait() helpers
> we can now flush only the batches touching the BO we want to access
> from the CPU.
> 
> Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
> ---
>  src/gallium/drivers/panfrost/pan_resource.c | 27 +++++++++------------
>  1 file changed, 12 insertions(+), 15 deletions(-)
> 
> diff --git a/src/gallium/drivers/panfrost/pan_resource.c b/src/gallium/drivers/panfrost/pan_resource.c
> index 1f7605adcd5d..d59529ff15b7 100644
> --- a/src/gallium/drivers/panfrost/pan_resource.c
> +++ b/src/gallium/drivers/panfrost/pan_resource.c
> @@ -578,10 +578,8 @@ panfrost_transfer_map(struct pipe_context *pctx,
>                          is_bound |= fb->cbufs[c]->texture == resource;
>          }
>  
> -        if (is_bound && (usage & PIPE_TRANSFER_READ)) {
> -                assert(level == 0);
> -                panfrost_flush_all_batches(ctx, true);
> -        }
> +        if (is_bound && (usage & PIPE_TRANSFER_READ))
> +                 assert(level == 0);
>  
>          /* TODO: Respect usage flags */
>  
> @@ -594,11 +592,11 @@ panfrost_transfer_map(struct pipe_context *pctx,
>                  /* No flush for writes to uninitialized */
>          } else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
>                  if (usage & PIPE_TRANSFER_WRITE) {
> -                        /* STUB: flush reading */
> -                        //printf("debug: missed reading flush %d\n", resource->target);
> +                        panfrost_flush_batches_accessing_bo(ctx, bo, PAN_BO_GPU_ACCESS_RW);
> +                        panfrost_bo_wait(bo, INT64_MAX, PAN_BO_GPU_ACCESS_RW);
>                  } else if (usage & PIPE_TRANSFER_READ) {
> -                        /* STUB: flush writing */
> -                        //printf("debug: missed writing flush %d (%d-%d)\n", resource->target, box->x, box->x + box->width);
> +                        panfrost_flush_batches_accessing_bo(ctx, bo, PAN_BO_GPU_ACCESS_WRITE);
> +                        panfrost_bo_wait(bo, INT64_MAX, PAN_BO_GPU_ACCESS_WRITE);
>                  } else {
>                          /* Why are you even mapping?! */
>                  }
> @@ -748,11 +746,8 @@ panfrost_generate_mipmap(
>           * reorder-type optimizations in place. But for now prioritize
>           * correctness. */
>  
> -        struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
> -        bool has_draws = batch->last_job.gpu;
> -
> -        if (has_draws)
> -                panfrost_flush_all_batches(ctx, true);
> +        panfrost_flush_batches_accessing_bo(ctx, rsrc->bo, PAN_BO_GPU_ACCESS_RW);
> +        panfrost_bo_wait(rsrc->bo, INT64_MAX, PAN_BO_GPU_ACCESS_RW);
>  
>          /* We've flushed the original buffer if needed, now trigger a blit */
>  
> @@ -765,8 +760,10 @@ panfrost_generate_mipmap(
>          /* If the blit was successful, flush once more. If it wasn't, well, let
>           * the state tracker deal with it. */
>  
> -        if (blit_res)
> -                panfrost_flush_all_batches(ctx, true);
> +        if (blit_res) {
> +                panfrost_flush_batches_accessing_bo(ctx, rsrc->bo, PAN_BO_GPU_ACCESS_WRITE);
> +                panfrost_bo_wait(rsrc->bo, INT64_MAX, PAN_BO_GPU_ACCESS_WRITE);
> +        }
>  
>          return blit_res;
>  }
> -- 
> 2.21.0