[v2,1/2] spirv/nir: handle memory access qualifiers for SSBO loads/stores

Submitted by Samuel Pitoiset on Oct. 9, 2018, 3:07 p.m.

Details

Message ID 20181009150731.29402-1-samuel.pitoiset@gmail.com
State New
Headers show
Series "Series without cover letter" ( rev: 2 ) in Mesa

Not browsing as part of any series.

Commit Message

Samuel Pitoiset Oct. 9, 2018, 3:07 p.m.
v2: change how the access qualifiers are accumulated

Signed-off-by: Samuel Pitoiset <samuel.pitoiset@gmail.com>
---
 src/compiler/nir/nir_intrinsics.py |  4 +--
 src/compiler/spirv/spirv_to_nir.c  | 14 +++++++--
 src/compiler/spirv/vtn_private.h   |  6 ++++
 src/compiler/spirv/vtn_variables.c | 48 ++++++++++++++++++++++--------
 4 files changed, 55 insertions(+), 17 deletions(-)

Patch hide | download patch | download mbox

diff --git a/src/compiler/nir/nir_intrinsics.py b/src/compiler/nir/nir_intrinsics.py
index b06b38fc2c..ec3049ca06 100644
--- a/src/compiler/nir/nir_intrinsics.py
+++ b/src/compiler/nir/nir_intrinsics.py
@@ -565,7 +565,7 @@  intrinsic("load_interpolated_input", src_comp=[2, 1], dest_comp=0,
           indices=[BASE, COMPONENT], flags=[CAN_ELIMINATE, CAN_REORDER])
 
 # src[] = { buffer_index, offset }. No const_index
-load("ssbo", 2, flags=[CAN_ELIMINATE])
+load("ssbo", 2, flags=[CAN_ELIMINATE], indices=[ACCESS])
 # src[] = { offset }. const_index[] = { base, component }
 load("output", 1, [BASE, COMPONENT], flags=[CAN_ELIMINATE])
 # src[] = { vertex, offset }. const_index[] = { base }
@@ -591,6 +591,6 @@  store("output", 2, [BASE, WRMASK, COMPONENT])
 # const_index[] = { base, write_mask, component }
 store("per_vertex_output", 3, [BASE, WRMASK, COMPONENT])
 # src[] = { value, block_index, offset }. const_index[] = { write_mask }
-store("ssbo", 3, [WRMASK])
+store("ssbo", 3, [WRMASK, ACCESS])
 # src[] = { value, offset }. const_index[] = { base, write_mask }
 store("shared", 2, [BASE, WRMASK])
diff --git a/src/compiler/spirv/spirv_to_nir.c b/src/compiler/spirv/spirv_to_nir.c
index 2ad83196e4..87017ca500 100644
--- a/src/compiler/spirv/spirv_to_nir.c
+++ b/src/compiler/spirv/spirv_to_nir.c
@@ -681,13 +681,21 @@  struct_member_decoration_cb(struct vtn_builder *b,
    assert(member < ctx->num_fields);
 
    switch (dec->decoration) {
+   case SpvDecorationRelaxedPrecision:
+   case SpvDecorationUniform:
+      break; /* FIXME: Do nothing with this for now. */
    case SpvDecorationNonWritable:
+      ctx->type->members[member]->access |= ACCESS_NON_WRITEABLE;
+      break;
    case SpvDecorationNonReadable:
-   case SpvDecorationRelaxedPrecision:
+      ctx->type->members[member]->access |= ACCESS_NON_READABLE;
+      break;
    case SpvDecorationVolatile:
+      ctx->type->members[member]->access |= ACCESS_VOLATILE;
+      break;
    case SpvDecorationCoherent:
-   case SpvDecorationUniform:
-      break; /* FIXME: Do nothing with this for now. */
+      ctx->type->members[member]->access |= ACCESS_COHERENT;
+      break;
    case SpvDecorationNoPerspective:
       ctx->fields[member].interpolation = INTERP_MODE_NOPERSPECTIVE;
       break;
diff --git a/src/compiler/spirv/vtn_private.h b/src/compiler/spirv/vtn_private.h
index a31202d129..62fb972a0c 100644
--- a/src/compiler/spirv/vtn_private.h
+++ b/src/compiler/spirv/vtn_private.h
@@ -296,6 +296,9 @@  struct vtn_type {
    /* for arrays, matrices and pointers, the array stride */
    unsigned stride;
 
+   /* Access qualifiers */
+   enum gl_access_qualifier access;
+
    union {
       /* Members for scalar, vector, and array-like types */
       struct {
@@ -457,6 +460,9 @@  struct vtn_pointer {
    /** A (block_index, offset) pair representing a UBO or SSBO position. */
    struct nir_ssa_def *block_index;
    struct nir_ssa_def *offset;
+
+   /* Access qualifiers */
+   enum gl_access_qualifier access;
 };
 
 struct vtn_variable {
diff --git a/src/compiler/spirv/vtn_variables.c b/src/compiler/spirv/vtn_variables.c
index 636fdb8689..be22f25422 100644
--- a/src/compiler/spirv/vtn_variables.c
+++ b/src/compiler/spirv/vtn_variables.c
@@ -89,6 +89,7 @@  vtn_access_chain_pointer_dereference(struct vtn_builder *b,
    struct vtn_access_chain *chain =
       vtn_access_chain_extend(b, base->chain, deref_chain->length);
    struct vtn_type *type = base->type;
+   enum gl_access_qualifier access = base->access;
 
    /* OpPtrAccessChain is only allowed on things which support variable
     * pointers.  For everything else, the client is expected to just pass us
@@ -106,6 +107,8 @@  vtn_access_chain_pointer_dereference(struct vtn_builder *b,
       } else {
          type = type->array_element;
       }
+
+      access |= type->access;
    }
 
    struct vtn_pointer *ptr = rzalloc(b, struct vtn_pointer);
@@ -114,6 +117,7 @@  vtn_access_chain_pointer_dereference(struct vtn_builder *b,
    ptr->var = base->var;
    ptr->deref = base->deref;
    ptr->chain = chain;
+   ptr->access = access;
 
    return ptr;
 }
@@ -184,6 +188,7 @@  vtn_ssa_offset_pointer_dereference(struct vtn_builder *b,
    nir_ssa_def *block_index = base->block_index;
    nir_ssa_def *offset = base->offset;
    struct vtn_type *type = base->type;
+   enum gl_access_qualifier access = base->access;
 
    unsigned idx = 0;
    if (base->mode == vtn_variable_mode_ubo ||
@@ -198,6 +203,7 @@  vtn_ssa_offset_pointer_dereference(struct vtn_builder *b,
                idx++;
                /* This consumes a level of type */
                type = type->array_element;
+               access |= type->access;
             } else {
                /* This is annoying.  We've been asked for a pointer to the
                 * array of UBOs/SSBOs and not a specifc buffer.  Return a
@@ -319,6 +325,7 @@  vtn_ssa_offset_pointer_dereference(struct vtn_builder *b,
             vtn_access_link_as_ssa(b, deref_chain->link[idx], type->stride);
          offset = nir_iadd(&b->nb, offset, elem_offset);
          type = type->array_element;
+         access |= type->access;
          break;
       }
 
@@ -328,6 +335,7 @@  vtn_ssa_offset_pointer_dereference(struct vtn_builder *b,
          nir_ssa_def *mem_offset = nir_imm_int(&b->nb, type->offsets[member]);
          offset = nir_iadd(&b->nb, offset, mem_offset);
          type = type->members[member];
+         access |= type->access;
          break;
       }
 
@@ -341,6 +349,7 @@  vtn_ssa_offset_pointer_dereference(struct vtn_builder *b,
    ptr->type = type;
    ptr->block_index = block_index;
    ptr->offset = offset;
+   ptr->access = access;
 
    return ptr;
 }
@@ -608,7 +617,8 @@  static void
 _vtn_load_store_tail(struct vtn_builder *b, nir_intrinsic_op op, bool load,
                      nir_ssa_def *index, nir_ssa_def *offset,
                      unsigned access_offset, unsigned access_size,
-                     struct vtn_ssa_value **inout, const struct glsl_type *type)
+                     struct vtn_ssa_value **inout, const struct glsl_type *type,
+                     enum gl_access_qualifier access)
 {
    nir_intrinsic_instr *instr = nir_intrinsic_instr_create(b->nb.shader, op);
    instr->num_components = glsl_get_vector_elements(type);
@@ -624,6 +634,11 @@  _vtn_load_store_tail(struct vtn_builder *b, nir_intrinsic_op op, bool load,
       nir_intrinsic_set_range(instr, access_size);
    }
 
+   if (op == nir_intrinsic_load_ssbo ||
+       op == nir_intrinsic_store_ssbo) {
+      nir_intrinsic_set_access(instr, access);
+   }
+
    if (index)
       instr->src[src++] = nir_src_for_ssa(index);
 
@@ -654,7 +669,8 @@  static void
 _vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load,
                       nir_ssa_def *index, nir_ssa_def *offset,
                       unsigned access_offset, unsigned access_size,
-                      struct vtn_type *type, struct vtn_ssa_value **inout)
+                      struct vtn_type *type, enum gl_access_qualifier access,
+                      struct vtn_ssa_value **inout)
 {
    if (load && *inout == NULL)
       *inout = vtn_create_ssa_value(b, type->type);
@@ -704,7 +720,8 @@  _vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load,
             _vtn_load_store_tail(b, op, load, index, elem_offset,
                                  access_offset, access_size,
                                  &(*inout)->elems[i],
-                                 glsl_vector_type(base_type, vec_width));
+                                 glsl_vector_type(base_type, vec_width),
+                                 access | type->access);
          }
 
          if (load && type->row_major)
@@ -717,7 +734,7 @@  _vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load,
             vtn_assert(glsl_type_is_vector_or_scalar(type->type));
             _vtn_load_store_tail(b, op, load, index, offset,
                                  access_offset, access_size,
-                                 inout, type->type);
+                                 inout, type->type, access | type->access);
          } else {
             /* This is a strided load.  We have to load N things separately.
              * This is the single column of a row-major matrix case.
@@ -738,7 +755,8 @@  _vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load,
                comp = &temp_val;
                _vtn_load_store_tail(b, op, load, index, elem_offset,
                                     access_offset, access_size,
-                                    &comp, glsl_scalar_type(base_type));
+                                    &comp, glsl_scalar_type(base_type),
+                                    access | type->access);
                per_comp[i] = comp->def;
             }
 
@@ -758,7 +776,9 @@  _vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load,
             nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, i * type->stride));
          _vtn_block_load_store(b, op, load, index, elem_off,
                                access_offset, access_size,
-                               type->array_element, &(*inout)->elems[i]);
+                               type->array_element,
+                               access | type->access,
+                               &(*inout)->elems[i]);
       }
       return;
    }
@@ -770,7 +790,9 @@  _vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load,
             nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, type->offsets[i]));
          _vtn_block_load_store(b, op, load, index, elem_off,
                                access_offset, access_size,
-                               type->members[i], &(*inout)->elems[i]);
+                               type->members[i],
+                               access | type->members[i]->access,
+                               &(*inout)->elems[i]);
       }
       return;
    }
@@ -809,7 +831,7 @@  vtn_block_load(struct vtn_builder *b, struct vtn_pointer *src)
    struct vtn_ssa_value *value = NULL;
    _vtn_block_load_store(b, op, true, index, offset,
                          access_offset, access_size,
-                         src->type, &value);
+                         src->type, src->access, &value);
    return value;
 }
 
@@ -833,12 +855,13 @@  vtn_block_store(struct vtn_builder *b, struct vtn_ssa_value *src,
    offset = vtn_pointer_to_offset(b, dst, &index);
 
    _vtn_block_load_store(b, op, false, index, offset,
-                         0, 0, dst->type, &src);
+                         0, 0, dst->type, dst->access, &src);
 }
 
 static void
 _vtn_variable_load_store(struct vtn_builder *b, bool load,
                          struct vtn_pointer *ptr,
+                         enum gl_access_qualifier access,
                          struct vtn_ssa_value **inout)
 {
    enum glsl_base_type base_type = glsl_get_base_type(ptr->type->type);
@@ -887,7 +910,8 @@  _vtn_variable_load_store(struct vtn_builder *b, bool load,
       for (unsigned i = 0; i < elems; i++) {
          chain.link[0].id = i;
          struct vtn_pointer *elem = vtn_pointer_dereference(b, ptr, &chain);
-         _vtn_variable_load_store(b, load, elem, &(*inout)->elems[i]);
+         _vtn_variable_load_store(b, load, elem, access | elem->access,
+                                  &(*inout)->elems[i]);
       }
       return;
    }
@@ -904,7 +928,7 @@  vtn_variable_load(struct vtn_builder *b, struct vtn_pointer *src)
       return vtn_block_load(b, src);
    } else {
       struct vtn_ssa_value *val = NULL;
-      _vtn_variable_load_store(b, true, src, &val);
+      _vtn_variable_load_store(b, true, src, src->access, &val);
       return val;
    }
 }
@@ -918,7 +942,7 @@  vtn_variable_store(struct vtn_builder *b, struct vtn_ssa_value *src,
                  dest->mode == vtn_variable_mode_workgroup);
       vtn_block_store(b, src, dest);
    } else {
-      _vtn_variable_load_store(b, false, dest, &src);
+      _vtn_variable_load_store(b, false, dest, dest->access, &src);
    }
 }
 

Comments

On Tue, Oct 9, 2018 at 10:05 AM Samuel Pitoiset <samuel.pitoiset@gmail.com>
wrote:

> v2: change how the access qualifiers are accumulated
>
> Signed-off-by: Samuel Pitoiset <samuel.pitoiset@gmail.com>
> ---
>  src/compiler/nir/nir_intrinsics.py |  4 +--
>  src/compiler/spirv/spirv_to_nir.c  | 14 +++++++--
>  src/compiler/spirv/vtn_private.h   |  6 ++++
>  src/compiler/spirv/vtn_variables.c | 48 ++++++++++++++++++++++--------
>  4 files changed, 55 insertions(+), 17 deletions(-)
>
> diff --git a/src/compiler/nir/nir_intrinsics.py
> b/src/compiler/nir/nir_intrinsics.py
> index b06b38fc2c..ec3049ca06 100644
> --- a/src/compiler/nir/nir_intrinsics.py
> +++ b/src/compiler/nir/nir_intrinsics.py
> @@ -565,7 +565,7 @@ intrinsic("load_interpolated_input", src_comp=[2, 1],
> dest_comp=0,
>            indices=[BASE, COMPONENT], flags=[CAN_ELIMINATE, CAN_REORDER])
>
>  # src[] = { buffer_index, offset }. No const_index
> -load("ssbo", 2, flags=[CAN_ELIMINATE])
> +load("ssbo", 2, flags=[CAN_ELIMINATE], indices=[ACCESS])
>  # src[] = { offset }. const_index[] = { base, component }
>  load("output", 1, [BASE, COMPONENT], flags=[CAN_ELIMINATE])
>  # src[] = { vertex, offset }. const_index[] = { base }
> @@ -591,6 +591,6 @@ store("output", 2, [BASE, WRMASK, COMPONENT])
>  # const_index[] = { base, write_mask, component }
>  store("per_vertex_output", 3, [BASE, WRMASK, COMPONENT])
>  # src[] = { value, block_index, offset }. const_index[] = { write_mask }
> -store("ssbo", 3, [WRMASK])
> +store("ssbo", 3, [WRMASK, ACCESS])
>  # src[] = { value, offset }. const_index[] = { base, write_mask }
>  store("shared", 2, [BASE, WRMASK])
> diff --git a/src/compiler/spirv/spirv_to_nir.c
> b/src/compiler/spirv/spirv_to_nir.c
> index 2ad83196e4..87017ca500 100644
> --- a/src/compiler/spirv/spirv_to_nir.c
> +++ b/src/compiler/spirv/spirv_to_nir.c
> @@ -681,13 +681,21 @@ struct_member_decoration_cb(struct vtn_builder *b,
>     assert(member < ctx->num_fields);
>
>     switch (dec->decoration) {
> +   case SpvDecorationRelaxedPrecision:
> +   case SpvDecorationUniform:
> +      break; /* FIXME: Do nothing with this for now. */
>     case SpvDecorationNonWritable:
> +      ctx->type->members[member]->access |= ACCESS_NON_WRITEABLE;
> +      break;
>     case SpvDecorationNonReadable:
> -   case SpvDecorationRelaxedPrecision:
> +      ctx->type->members[member]->access |= ACCESS_NON_READABLE;
> +      break;
>     case SpvDecorationVolatile:
> +      ctx->type->members[member]->access |= ACCESS_VOLATILE;
> +      break;
>     case SpvDecorationCoherent:
> -   case SpvDecorationUniform:
> -      break; /* FIXME: Do nothing with this for now. */
> +      ctx->type->members[member]->access |= ACCESS_COHERENT;
>

We really should duplicate the types prior to adding access qualifiers.
Otherwise, instances of that type which aren't part of this struct may
accidentally get them.  I'm not 100% sure how we want to do this.  One
option would be to add something to the member_decoration_ctx that stores
access qualifiers and a row_major boolean and fill it out in
struct_member_decoration_cb and then afterwards walk the members and only
duplicate if needed.  Another option would be to just duplicate all the
time and not care about the wasted memory.  Given how few UBO/SSBO struct
types you're likely to have in a given SPIR-V module, I doubt that either
solution will really have all that much cost.  Do whatever's easiest.

In other news, I really hate OpMemberDecorate.


> +      break;
>     case SpvDecorationNoPerspective:
>        ctx->fields[member].interpolation = INTERP_MODE_NOPERSPECTIVE;
>        break;
> diff --git a/src/compiler/spirv/vtn_private.h
> b/src/compiler/spirv/vtn_private.h
> index a31202d129..62fb972a0c 100644
> --- a/src/compiler/spirv/vtn_private.h
> +++ b/src/compiler/spirv/vtn_private.h
> @@ -296,6 +296,9 @@ struct vtn_type {
>     /* for arrays, matrices and pointers, the array stride */
>     unsigned stride;
>
> +   /* Access qualifiers */
> +   enum gl_access_qualifier access;
> +
>     union {
>        /* Members for scalar, vector, and array-like types */
>        struct {
> @@ -457,6 +460,9 @@ struct vtn_pointer {
>     /** A (block_index, offset) pair representing a UBO or SSBO position.
> */
>     struct nir_ssa_def *block_index;
>     struct nir_ssa_def *offset;
> +
> +   /* Access qualifiers */
> +   enum gl_access_qualifier access;
>  };
>
>  struct vtn_variable {
> diff --git a/src/compiler/spirv/vtn_variables.c
> b/src/compiler/spirv/vtn_variables.c
> index 636fdb8689..be22f25422 100644
> --- a/src/compiler/spirv/vtn_variables.c
> +++ b/src/compiler/spirv/vtn_variables.c
> @@ -89,6 +89,7 @@ vtn_access_chain_pointer_dereference(struct vtn_builder
> *b,
>     struct vtn_access_chain *chain =
>        vtn_access_chain_extend(b, base->chain, deref_chain->length);
>     struct vtn_type *type = base->type;
> +   enum gl_access_qualifier access = base->access;
>
>     /* OpPtrAccessChain is only allowed on things which support variable
>      * pointers.  For everything else, the client is expected to just pass
> us
> @@ -106,6 +107,8 @@ vtn_access_chain_pointer_dereference(struct
> vtn_builder *b,
>        } else {
>           type = type->array_element;
>        }
> +
> +      access |= type->access;
>     }
>
>     struct vtn_pointer *ptr = rzalloc(b, struct vtn_pointer);
> @@ -114,6 +117,7 @@ vtn_access_chain_pointer_dereference(struct
> vtn_builder *b,
>     ptr->var = base->var;
>     ptr->deref = base->deref;
>     ptr->chain = chain;
> +   ptr->access = access;
>
>     return ptr;
>  }
> @@ -184,6 +188,7 @@ vtn_ssa_offset_pointer_dereference(struct vtn_builder
> *b,
>     nir_ssa_def *block_index = base->block_index;
>     nir_ssa_def *offset = base->offset;
>     struct vtn_type *type = base->type;
> +   enum gl_access_qualifier access = base->access;
>
>     unsigned idx = 0;
>     if (base->mode == vtn_variable_mode_ubo ||
> @@ -198,6 +203,7 @@ vtn_ssa_offset_pointer_dereference(struct vtn_builder
> *b,
>                 idx++;
>                 /* This consumes a level of type */
>                 type = type->array_element;
> +               access |= type->access;
>              } else {
>                 /* This is annoying.  We've been asked for a pointer to the
>                  * array of UBOs/SSBOs and not a specifc buffer.  Return a
> @@ -319,6 +325,7 @@ vtn_ssa_offset_pointer_dereference(struct vtn_builder
> *b,
>              vtn_access_link_as_ssa(b, deref_chain->link[idx],
> type->stride);
>           offset = nir_iadd(&b->nb, offset, elem_offset);
>           type = type->array_element;
> +         access |= type->access;
>           break;
>        }
>
> @@ -328,6 +335,7 @@ vtn_ssa_offset_pointer_dereference(struct vtn_builder
> *b,
>           nir_ssa_def *mem_offset = nir_imm_int(&b->nb,
> type->offsets[member]);
>           offset = nir_iadd(&b->nb, offset, mem_offset);
>           type = type->members[member];
> +         access |= type->access;
>           break;
>        }
>
> @@ -341,6 +349,7 @@ vtn_ssa_offset_pointer_dereference(struct vtn_builder
> *b,
>     ptr->type = type;
>     ptr->block_index = block_index;
>     ptr->offset = offset;
> +   ptr->access = access;
>
>     return ptr;
>  }
>

Not sure where to put this comment so it's going here:

The SPIR-V spec also allows those decorations to show up on the OpVariabale
so we also need to do a few more things.  (Oh, if only this were all
tested....):

 1) Add a access qualifiers to vtn_variable.
 2) Handle access qualifiers in var_decoration_cb
 3) In vtn_pointer_for_variable, set the access qualifiers of the pointer
to var->access | var->type->access.  I think technically only var->access
is sufficient because you can't apply access qualifiers to just any type,
it has to be a pointer or a struct member.  Still, doesn't hurt to be
thorough.

I'm sorry this is all so painful.  Welcome to SPIR-V decorations...


> @@ -608,7 +617,8 @@ static void
>  _vtn_load_store_tail(struct vtn_builder *b, nir_intrinsic_op op, bool
> load,
>                       nir_ssa_def *index, nir_ssa_def *offset,
>                       unsigned access_offset, unsigned access_size,
> -                     struct vtn_ssa_value **inout, const struct glsl_type
> *type)
> +                     struct vtn_ssa_value **inout, const struct glsl_type
> *type,
> +                     enum gl_access_qualifier access)
>  {
>     nir_intrinsic_instr *instr = nir_intrinsic_instr_create(b->nb.shader,
> op);
>     instr->num_components = glsl_get_vector_elements(type);
> @@ -624,6 +634,11 @@ _vtn_load_store_tail(struct vtn_builder *b,
> nir_intrinsic_op op, bool load,
>        nir_intrinsic_set_range(instr, access_size);
>     }
>
> +   if (op == nir_intrinsic_load_ssbo ||
> +       op == nir_intrinsic_store_ssbo) {
> +      nir_intrinsic_set_access(instr, access);
> +   }
> +
>     if (index)
>        instr->src[src++] = nir_src_for_ssa(index);
>
> @@ -654,7 +669,8 @@ static void
>  _vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool
> load,
>                        nir_ssa_def *index, nir_ssa_def *offset,
>                        unsigned access_offset, unsigned access_size,
> -                      struct vtn_type *type, struct vtn_ssa_value **inout)
> +                      struct vtn_type *type, enum gl_access_qualifier
> access,
> +                      struct vtn_ssa_value **inout)
>  {
>     if (load && *inout == NULL)
>        *inout = vtn_create_ssa_value(b, type->type);
> @@ -704,7 +720,8 @@ _vtn_block_load_store(struct vtn_builder *b,
> nir_intrinsic_op op, bool load,
>              _vtn_load_store_tail(b, op, load, index, elem_offset,
>                                   access_offset, access_size,
>                                   &(*inout)->elems[i],
> -                                 glsl_vector_type(base_type, vec_width));
> +                                 glsl_vector_type(base_type, vec_width),
> +                                 access | type->access);
>           }
>
>           if (load && type->row_major)
> @@ -717,7 +734,7 @@ _vtn_block_load_store(struct vtn_builder *b,
> nir_intrinsic_op op, bool load,
>              vtn_assert(glsl_type_is_vector_or_scalar(type->type));
>              _vtn_load_store_tail(b, op, load, index, offset,
>                                   access_offset, access_size,
> -                                 inout, type->type);
> +                                 inout, type->type, access |
> type->access);
>           } else {
>              /* This is a strided load.  We have to load N things
> separately.
>               * This is the single column of a row-major matrix case.
> @@ -738,7 +755,8 @@ _vtn_block_load_store(struct vtn_builder *b,
> nir_intrinsic_op op, bool load,
>                 comp = &temp_val;
>                 _vtn_load_store_tail(b, op, load, index, elem_offset,
>                                      access_offset, access_size,
> -                                    &comp, glsl_scalar_type(base_type));
> +                                    &comp, glsl_scalar_type(base_type),
> +                                    access | type->access);
>                 per_comp[i] = comp->def;
>              }
>
> @@ -758,7 +776,9 @@ _vtn_block_load_store(struct vtn_builder *b,
> nir_intrinsic_op op, bool load,
>              nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, i *
> type->stride));
>           _vtn_block_load_store(b, op, load, index, elem_off,
>                                 access_offset, access_size,
> -                               type->array_element, &(*inout)->elems[i]);
> +                               type->array_element,
> +                               access | type->access,
>

type->array_element->access?


> +                               &(*inout)->elems[i]);
>        }
>        return;
>     }
> @@ -770,7 +790,9 @@ _vtn_block_load_store(struct vtn_builder *b,
> nir_intrinsic_op op, bool load,
>              nir_iadd(&b->nb, offset, nir_imm_int(&b->nb,
> type->offsets[i]));
>           _vtn_block_load_store(b, op, load, index, elem_off,
>                                 access_offset, access_size,
> -                               type->members[i], &(*inout)->elems[i]);
> +                               type->members[i],
> +                               access | type->members[i]->access,
> +                               &(*inout)->elems[i]);
>        }
>        return;
>     }
> @@ -809,7 +831,7 @@ vtn_block_load(struct vtn_builder *b, struct
> vtn_pointer *src)
>     struct vtn_ssa_value *value = NULL;
>     _vtn_block_load_store(b, op, true, index, offset,
>                           access_offset, access_size,
> -                         src->type, &value);
> +                         src->type, src->access, &value);
>     return value;
>  }
>
> @@ -833,12 +855,13 @@ vtn_block_store(struct vtn_builder *b, struct
> vtn_ssa_value *src,
>     offset = vtn_pointer_to_offset(b, dst, &index);
>
>     _vtn_block_load_store(b, op, false, index, offset,
> -                         0, 0, dst->type, &src);
> +                         0, 0, dst->type, dst->access, &src);
>  }
>
>  static void
>  _vtn_variable_load_store(struct vtn_builder *b, bool load,
>                           struct vtn_pointer *ptr,
> +                         enum gl_access_qualifier access,
>                           struct vtn_ssa_value **inout)
>  {
>     enum glsl_base_type base_type = glsl_get_base_type(ptr->type->type);
> @@ -887,7 +910,8 @@ _vtn_variable_load_store(struct vtn_builder *b, bool
> load,
>        for (unsigned i = 0; i < elems; i++) {
>           chain.link[0].id = i;
>           struct vtn_pointer *elem = vtn_pointer_dereference(b, ptr,
> &chain);
> -         _vtn_variable_load_store(b, load, elem, &(*inout)->elems[i]);
> +         _vtn_variable_load_store(b, load, elem, access | elem->access,
> +                                  &(*inout)->elems[i]);
>

In this one, I think vtn_pointer_dereference will sort out access
qualifiers for you so you don't need to do it explicitly.  Sorry for making
a silly suggestion without first reading the code. :-(

       }
>        return;
>     }
> @@ -904,7 +928,7 @@ vtn_variable_load(struct vtn_builder *b, struct
> vtn_pointer *src)
>        return vtn_block_load(b, src);
>     } else {
>        struct vtn_ssa_value *val = NULL;
> -      _vtn_variable_load_store(b, true, src, &val);
> +      _vtn_variable_load_store(b, true, src, src->access, &val);
>

This


>        return val;
>     }
>  }
> @@ -918,7 +942,7 @@ vtn_variable_store(struct vtn_builder *b, struct
> vtn_ssa_value *src,
>                   dest->mode == vtn_variable_mode_workgroup);
>        vtn_block_store(b, src, dest);
>     } else {
> -      _vtn_variable_load_store(b, false, dest, &src);
> +      _vtn_variable_load_store(b, false, dest, dest->access, &src);
>

and this can be cropped as well.


>     }
>  }
>
> --
> 2.19.1
>
> _______________________________________________
> mesa-dev mailing list
> mesa-dev@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/mesa-dev
>