[v3] drm/i915/gvt: only copy the first page for restore inhibit context

Submitted by Zhao, Yan Y on Aug. 1, 2018, 4:15 a.m.

Details

Message ID 20180801041531.12388-1-yan.y.zhao@intel.com
State New
Headers show
Series "drm/i915/gvt: only copy the first page for restore inhibit context" ( rev: 2 ) in Intel GVT devel

Not browsing as part of any series.

Commit Message

Zhao, Yan Y Aug. 1, 2018, 4:15 a.m.
if a context is a restore inhibit context, gfx hw only load the first page
for ring context, so we only need to copy from guest the 1 page too.

v3: use "return" instead of "goto" for inhibit case. (zhenyu wang)
v2: move judgement of restore inhibit to a macro in  mmio_context.h

Signed-off-by: Zhao Yan <yan.y.zhao@intel.com>
---
 drivers/gpu/drm/i915/gvt/mmio_context.h |  3 ++
 drivers/gpu/drm/i915/gvt/scheduler.c    | 60 +++++++++++++------------
 2 files changed, 34 insertions(+), 29 deletions(-)

Patch hide | download patch | download mbox

diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
index 5c3b9ff9f96a..f7eaa442403f 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.h
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
@@ -53,5 +53,8 @@  bool is_inhibit_context(struct intel_context *ce);
 
 int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
 				       struct i915_request *req);
+#define IS_RESTORE_INHIBIT(a)	\
+	(_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) == \
+	((a) & _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT)))
 
 #endif
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index b0e566956b8d..8087354317c2 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -132,35 +132,6 @@  static int populate_shadow_context(struct intel_vgpu_workload *workload)
 	unsigned long context_gpa, context_page_num;
 	int i;
 
-	gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
-			workload->ctx_desc.lrca);
-
-	context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
-
-	context_page_num = context_page_num >> PAGE_SHIFT;
-
-	if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
-		context_page_num = 19;
-
-	i = 2;
-
-	while (i < context_page_num) {
-		context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
-				(u32)((workload->ctx_desc.lrca + i) <<
-				I915_GTT_PAGE_SHIFT));
-		if (context_gpa == INTEL_GVT_INVALID_ADDR) {
-			gvt_vgpu_err("Invalid guest context descriptor\n");
-			return -EFAULT;
-		}
-
-		page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
-		dst = kmap(page);
-		intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
-				I915_GTT_PAGE_SIZE);
-		kunmap(page);
-		i++;
-	}
-
 	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
 	shadow_ring_context = kmap(page);
 
@@ -195,6 +166,37 @@  static int populate_shadow_context(struct intel_vgpu_workload *workload)
 
 	sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
 	kunmap(page);
+
+	if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val))
+		return 0;
+
+	gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
+			workload->ctx_desc.lrca);
+
+	context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
+
+	context_page_num = context_page_num >> PAGE_SHIFT;
+
+	if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+		context_page_num = 19;
+
+	i = 2;
+	while (i < context_page_num) {
+		context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+				(u32)((workload->ctx_desc.lrca + i) <<
+				I915_GTT_PAGE_SHIFT));
+		if (context_gpa == INTEL_GVT_INVALID_ADDR) {
+			gvt_vgpu_err("Invalid guest context descriptor\n");
+			return -EFAULT;
+		}
+
+		page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
+		dst = kmap(page);
+		intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
+				I915_GTT_PAGE_SIZE);
+		kunmap(page);
+		i++;
+	}
 	return 0;
 }
 

Comments

Thanks for the patch. I think it's a good optimization. Acked-by Hang Yuan.

> -----Original Message-----

> From: intel-gvt-dev [mailto:intel-gvt-dev-bounces@lists.freedesktop.org] On

> Behalf Of Zhao Yan

> Sent: Wednesday, August 1, 2018 12:16 PM

> To: intel-gvt-dev@lists.freedesktop.org

> Cc: Zhao, Yan Y <yan.y.zhao@intel.com>

> Subject: [PATCH v3] drm/i915/gvt: only copy the first page for restore inhibit

> context

> 

> if a context is a restore inhibit context, gfx hw only load the first page for

> ring context, so we only need to copy from guest the 1 page too.

> 

> v3: use "return" instead of "goto" for inhibit case. (zhenyu wang)

> v2: move judgement of restore inhibit to a macro in  mmio_context.h

> 

> Signed-off-by: Zhao Yan <yan.y.zhao@intel.com>

> ---

>  drivers/gpu/drm/i915/gvt/mmio_context.h |  3 ++

>  drivers/gpu/drm/i915/gvt/scheduler.c    | 60 +++++++++++++------------

>  2 files changed, 34 insertions(+), 29 deletions(-)

> 

> diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h

> b/drivers/gpu/drm/i915/gvt/mmio_context.h

> index 5c3b9ff9f96a..f7eaa442403f 100644

> --- a/drivers/gpu/drm/i915/gvt/mmio_context.h

> +++ b/drivers/gpu/drm/i915/gvt/mmio_context.h

> @@ -53,5 +53,8 @@ bool is_inhibit_context(struct intel_context *ce);

> 

>  int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,

>  				       struct i915_request *req);

> +#define IS_RESTORE_INHIBIT(a)	\

> +	(_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT)

> == \

> +	((a) &

> _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT)))

> 

>  #endif

> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c

> b/drivers/gpu/drm/i915/gvt/scheduler.c

> index b0e566956b8d..8087354317c2 100644

> --- a/drivers/gpu/drm/i915/gvt/scheduler.c

> +++ b/drivers/gpu/drm/i915/gvt/scheduler.c

> @@ -132,35 +132,6 @@ static int populate_shadow_context(struct

> intel_vgpu_workload *workload)

>  	unsigned long context_gpa, context_page_num;

>  	int i;

> 

> -	gvt_dbg_sched("ring id %d workload lrca %x", ring_id,

> -			workload->ctx_desc.lrca);

> -

> -	context_page_num = gvt->dev_priv->engine[ring_id]->context_size;

> -

> -	context_page_num = context_page_num >> PAGE_SHIFT;

> -

> -	if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)

> -		context_page_num = 19;

> -

> -	i = 2;

> -

> -	while (i < context_page_num) {

> -		context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,

> -				(u32)((workload->ctx_desc.lrca + i) <<

> -				I915_GTT_PAGE_SHIFT));

> -		if (context_gpa == INTEL_GVT_INVALID_ADDR) {

> -			gvt_vgpu_err("Invalid guest context descriptor\n");

> -			return -EFAULT;

> -		}

> -

> -		page = i915_gem_object_get_page(ctx_obj,

> LRC_HEADER_PAGES + i);

> -		dst = kmap(page);

> -		intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,

> -				I915_GTT_PAGE_SIZE);

> -		kunmap(page);

> -		i++;

> -	}

> -

>  	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);

>  	shadow_ring_context = kmap(page);

> 

> @@ -195,6 +166,37 @@ static int populate_shadow_context(struct

> intel_vgpu_workload *workload)

> 

>  	sr_oa_regs(workload, (u32 *)shadow_ring_context, false);

>  	kunmap(page);

> +

> +	if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val))

> +		return 0;

> +

> +	gvt_dbg_sched("ring id %d workload lrca %x", ring_id,

> +			workload->ctx_desc.lrca);

> +

> +	context_page_num = gvt->dev_priv->engine[ring_id]->context_size;

> +

> +	context_page_num = context_page_num >> PAGE_SHIFT;

> +

> +	if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)

> +		context_page_num = 19;

> +

> +	i = 2;

> +	while (i < context_page_num) {

> +		context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,

> +				(u32)((workload->ctx_desc.lrca + i) <<

> +				I915_GTT_PAGE_SHIFT));

> +		if (context_gpa == INTEL_GVT_INVALID_ADDR) {

> +			gvt_vgpu_err("Invalid guest context descriptor\n");

> +			return -EFAULT;

> +		}

> +

> +		page = i915_gem_object_get_page(ctx_obj,

> LRC_HEADER_PAGES + i);

> +		dst = kmap(page);

> +		intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,

> +				I915_GTT_PAGE_SIZE);

> +		kunmap(page);

> +		i++;

> +	}

>  	return 0;

>  }

> 

> --

> 2.17.1

> 

> _______________________________________________

> intel-gvt-dev mailing list

> intel-gvt-dev@lists.freedesktop.org

> https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev
On 2018.08.01 08:53:25 +0000, intel-gvt-dev-bounces@lists.freedesktop.org wrote:
> Thanks for the patch. I think it's a good optimization. Acked-by Hang Yuan.
> 
> > -----Original Message-----
> > From: intel-gvt-dev [mailto:intel-gvt-dev-bounces@lists.freedesktop.org] On
> > Behalf Of Zhao Yan
> > Sent: Wednesday, August 1, 2018 12:16 PM
> > To: intel-gvt-dev@lists.freedesktop.org
> > Cc: Zhao, Yan Y <yan.y.zhao@intel.com>
> > Subject: [PATCH v3] drm/i915/gvt: only copy the first page for restore inhibit
> > context
> > 
> > if a context is a restore inhibit context, gfx hw only load the first page for
> > ring context, so we only need to copy from guest the 1 page too.
> > 
> > v3: use "return" instead of "goto" for inhibit case. (zhenyu wang)
> > v2: move judgement of restore inhibit to a macro in  mmio_context.h
> > 
> > Signed-off-by: Zhao Yan <yan.y.zhao@intel.com>
> > ---

Applied, thanks!