[Security,Check] drm/i915/gvt: only copy the first page for restore inhibit context

Submitted by Zhao, Yan Y on July 25, 2018, 6:42 a.m.

Details

Message ID 20180725064245.17458-1-yan.y.zhao@intel.com
State New
Headers show
Series "drm/i915/gvt: only copy the first page for restore inhibit context" ( rev: 1 ) in Intel GVT devel

Not browsing as part of any series.

Commit Message

Zhao, Yan Y July 25, 2018, 6:42 a.m.
if a context is a restore inhibit context, gfx hw only load the first page
for ring context, so we only need to copy from guest the 1 page too.

v2: move judgement of restore inhibit to a macro in  mmio_context.h

Signed-off-by: Zhao Yan <yan.y.zhao@intel.com>
---
 drivers/gpu/drm/i915/gvt/mmio_context.h |  3 ++
 drivers/gpu/drm/i915/gvt/scheduler.c    | 61 +++++++++++++------------
 2 files changed, 35 insertions(+), 29 deletions(-)

Patch hide | download patch | download mbox

diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
index 5c3b9ff9f96a..f7eaa442403f 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.h
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
@@ -53,5 +53,8 @@  bool is_inhibit_context(struct intel_context *ce);
 
 int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
 				       struct i915_request *req);
+#define IS_RESTORE_INHIBIT(a)	\
+	(_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) == \
+	((a) & _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT)))
 
 #endif
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 1ead1ccd574a..84a817a8abc2 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -132,35 +132,6 @@  static int populate_shadow_context(struct intel_vgpu_workload *workload)
 	unsigned long context_gpa, context_page_num;
 	int i;
 
-	gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
-			workload->ctx_desc.lrca);
-
-	context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
-
-	context_page_num = context_page_num >> PAGE_SHIFT;
-
-	if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
-		context_page_num = 19;
-
-	i = 2;
-
-	while (i < context_page_num) {
-		context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
-				(u32)((workload->ctx_desc.lrca + i) <<
-				I915_GTT_PAGE_SHIFT));
-		if (context_gpa == INTEL_GVT_INVALID_ADDR) {
-			gvt_vgpu_err("Invalid guest context descriptor\n");
-			return -EFAULT;
-		}
-
-		page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
-		dst = kmap(page);
-		intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
-				I915_GTT_PAGE_SIZE);
-		kunmap(page);
-		i++;
-	}
-
 	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
 	shadow_ring_context = kmap(page);
 
@@ -195,6 +166,38 @@  static int populate_shadow_context(struct intel_vgpu_workload *workload)
 
 	sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
 	kunmap(page);
+
+	if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val))
+		goto inhibit;
+
+	gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
+			workload->ctx_desc.lrca);
+
+	context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
+
+	context_page_num = context_page_num >> PAGE_SHIFT;
+
+	if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+		context_page_num = 19;
+
+	i = 2;
+	while (i < context_page_num) {
+		context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+				(u32)((workload->ctx_desc.lrca + i) <<
+				I915_GTT_PAGE_SHIFT));
+		if (context_gpa == INTEL_GVT_INVALID_ADDR) {
+			gvt_vgpu_err("Invalid guest context descriptor\n");
+			return -EFAULT;
+		}
+
+		page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
+		dst = kmap(page);
+		intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
+				I915_GTT_PAGE_SIZE);
+		kunmap(page);
+		i++;
+	}
+inhibit:
 	return 0;
 }
 

Comments


On 2018.07.25 02:42:45 -0400, Zhao Yan wrote:
> if a context is a restore inhibit context, gfx hw only load the first page
> for ring context, so we only need to copy from guest the 1 page too.
> 
> v2: move judgement of restore inhibit to a macro in  mmio_context.h
> 
> Signed-off-by: Zhao Yan <yan.y.zhao@intel.com>
> ---
>  drivers/gpu/drm/i915/gvt/mmio_context.h |  3 ++
>  drivers/gpu/drm/i915/gvt/scheduler.c    | 61 +++++++++++++------------
>  2 files changed, 35 insertions(+), 29 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
> index 5c3b9ff9f96a..f7eaa442403f 100644
> --- a/drivers/gpu/drm/i915/gvt/mmio_context.h
> +++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
> @@ -53,5 +53,8 @@ bool is_inhibit_context(struct intel_context *ce);
>  
>  int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
>  				       struct i915_request *req);
> +#define IS_RESTORE_INHIBIT(a)	\
> +	(_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) == \
> +	((a) & _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT)))
>  
>  #endif
> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
> index 1ead1ccd574a..84a817a8abc2 100644
> --- a/drivers/gpu/drm/i915/gvt/scheduler.c
> +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
> @@ -132,35 +132,6 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
>  	unsigned long context_gpa, context_page_num;
>  	int i;
>  
> -	gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
> -			workload->ctx_desc.lrca);
> -
> -	context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
> -
> -	context_page_num = context_page_num >> PAGE_SHIFT;
> -
> -	if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
> -		context_page_num = 19;
> -
> -	i = 2;
> -
> -	while (i < context_page_num) {
> -		context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
> -				(u32)((workload->ctx_desc.lrca + i) <<
> -				I915_GTT_PAGE_SHIFT));
> -		if (context_gpa == INTEL_GVT_INVALID_ADDR) {
> -			gvt_vgpu_err("Invalid guest context descriptor\n");
> -			return -EFAULT;
> -		}
> -
> -		page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
> -		dst = kmap(page);
> -		intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
> -				I915_GTT_PAGE_SIZE);
> -		kunmap(page);
> -		i++;
> -	}
> -
>  	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
>  	shadow_ring_context = kmap(page);
>  
> @@ -195,6 +166,38 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
>  
>  	sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
>  	kunmap(page);
> +
> +	if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val))
> +		goto inhibit;

just return here, not need extra goto.

> +
> +	gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
> +			workload->ctx_desc.lrca);
> +
> +	context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
> +
> +	context_page_num = context_page_num >> PAGE_SHIFT;
> +
> +	if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
> +		context_page_num = 19;
> +
> +	i = 2;
> +	while (i < context_page_num) {
> +		context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
> +				(u32)((workload->ctx_desc.lrca + i) <<
> +				I915_GTT_PAGE_SHIFT));
> +		if (context_gpa == INTEL_GVT_INVALID_ADDR) {
> +			gvt_vgpu_err("Invalid guest context descriptor\n");
> +			return -EFAULT;
> +		}
> +
> +		page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
> +		dst = kmap(page);
> +		intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
> +				I915_GTT_PAGE_SIZE);
> +		kunmap(page);
> +		i++;
> +	}
> +inhibit:
>  	return 0;
>  }
>  
> -- 
> 2.17.1
> 
> _______________________________________________
> intel-gvt-dev mailing list
> intel-gvt-dev@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev