[v2,2/2] drm/i915/gvt: Update time slice more frequently

Submitted by Zhipeng Gong on April 3, 2018, 1:24 a.m.

Details

Message ID 1522718665-4136-3-git-send-email-zhipeng.gong@intel.com
State New
Headers show
Series "drm/i915/gvt: fix some scheduler issues" ( rev: 2 ) in Intel GVT devel

Not browsing as part of any series.

Commit Message

Zhipeng Gong April 3, 2018, 1:24 a.m.
When there is only one vGPU in GVT-g and it submits workloads
continuously, it will not be scheduled out, vgpu_update_timeslice
is not called and its sched_in_time is not updated in a long time,
which can be several seconds or longer.
Once GVT-g pauses to submit workload for this vGPU due to heavy
host CPU workload, this vGPU get scheduled out and
vgpu_update_timeslice is called, its left_ts will be subtract
by a big value from sched_out_time - sched_in_time.
When GVT-g is going to submit workload for this vGPU again,
it will not be scheduled in until gvt_balance_timeslice reaches
stage 0 and reset its left_ts, which introduces several
hunderand milliseconds latency.

This patch updates time slice in every ms to update sched_in_time
timely.

v2: revise commit message

Signed-off-by: Zhipeng Gong <zhipeng.gong@intel.com>
Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
Cc: Min He <min.he@intel.com>
---
 drivers/gpu/drm/i915/gvt/sched_policy.c | 26 ++++++++++++++------------
 1 file changed, 14 insertions(+), 12 deletions(-)

Patch hide | download patch | download mbox

diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index a8b544a..0169e3b 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -53,7 +53,6 @@  struct vgpu_sched_data {
 	bool active;
 
 	ktime_t sched_in_time;
-	ktime_t sched_out_time;
 	ktime_t sched_time;
 	ktime_t left_ts;
 	ktime_t allocated_ts;
@@ -69,15 +68,19 @@  struct gvt_sched_data {
 	ktime_t expire_time;
 };
 
-static void vgpu_update_timeslice(struct intel_vgpu *pre_vgpu)
+static void vgpu_update_timeslice(struct intel_vgpu *vgpu, ktime_t cur_time)
 {
 	ktime_t delta_ts;
-	struct vgpu_sched_data *vgpu_data = pre_vgpu->sched_data;
+	struct vgpu_sched_data *vgpu_data;
 
-	delta_ts = vgpu_data->sched_out_time - vgpu_data->sched_in_time;
+	if (vgpu == NULL || vgpu == vgpu->gvt->idle_vgpu)
+		return;
 
-	vgpu_data->sched_time += delta_ts;
-	vgpu_data->left_ts -= delta_ts;
+	vgpu_data = vgpu->sched_data;
+	delta_ts = ktime_sub(cur_time, vgpu_data->sched_in_time);
+	vgpu_data->sched_time = ktime_add(vgpu_data->sched_time, delta_ts);
+	vgpu_data->left_ts = ktime_sub(vgpu_data->left_ts, delta_ts);
+	vgpu_data->sched_in_time = cur_time;
 }
 
 #define GVT_TS_BALANCE_PERIOD_MS 100
@@ -151,11 +154,7 @@  static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
 	}
 
 	cur_time = ktime_get();
-	if (scheduler->current_vgpu) {
-		vgpu_data = scheduler->current_vgpu->sched_data;
-		vgpu_data->sched_out_time = cur_time;
-		vgpu_update_timeslice(scheduler->current_vgpu);
-	}
+	vgpu_update_timeslice(scheduler->current_vgpu, cur_time);
 	vgpu_data = scheduler->next_vgpu->sched_data;
 	vgpu_data->sched_in_time = cur_time;
 
@@ -227,12 +226,14 @@  static void tbs_sched_func(struct gvt_sched_data *sched_data)
 void intel_gvt_schedule(struct intel_gvt *gvt)
 {
 	struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
+	ktime_t cur_time;
 
 	mutex_lock(&gvt->lock);
+	cur_time = ktime_get();
 
 	if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
 				(void *)&gvt->service_request)) {
-		if (ktime_get() >= sched_data->expire_time) {
+		if (cur_time >= sched_data->expire_time) {
 			gvt_balance_timeslice(sched_data);
 			sched_data->expire_time = ktime_add_ms(
 				sched_data->expire_time,
@@ -241,6 +242,7 @@  void intel_gvt_schedule(struct intel_gvt *gvt)
 	}
 	clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request);
 
+	vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time);
 	tbs_sched_func(sched_data);
 
 	mutex_unlock(&gvt->lock);

Comments

On 2018.04.03 09:24:25 +0800, Zhipeng Gong wrote:
> When there is only one vGPU in GVT-g and it submits workloads
> continuously, it will not be scheduled out, vgpu_update_timeslice
> is not called and its sched_in_time is not updated in a long time,
> which can be several seconds or longer.
> Once GVT-g pauses to submit workload for this vGPU due to heavy
> host CPU workload, this vGPU get scheduled out and
> vgpu_update_timeslice is called, its left_ts will be subtract
> by a big value from sched_out_time - sched_in_time.
> When GVT-g is going to submit workload for this vGPU again,
> it will not be scheduled in until gvt_balance_timeslice reaches
> stage 0 and reset its left_ts, which introduces several
> hunderand milliseconds latency.
> 
> This patch updates time slice in every ms to update sched_in_time
> timely.
> 
> v2: revise commit message
> 
> Signed-off-by: Zhipeng Gong <zhipeng.gong@intel.com>
> Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
> Cc: Min He <min.he@intel.com>
> ---
>  drivers/gpu/drm/i915/gvt/sched_policy.c | 26 ++++++++++++++------------
>  1 file changed, 14 insertions(+), 12 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
> index a8b544a..0169e3b 100644
> --- a/drivers/gpu/drm/i915/gvt/sched_policy.c
> +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
> @@ -53,7 +53,6 @@ struct vgpu_sched_data {
>  	bool active;
>  
>  	ktime_t sched_in_time;
> -	ktime_t sched_out_time;
>  	ktime_t sched_time;
>  	ktime_t left_ts;
>  	ktime_t allocated_ts;
> @@ -69,15 +68,19 @@ struct gvt_sched_data {
>  	ktime_t expire_time;
>  };
>  
> -static void vgpu_update_timeslice(struct intel_vgpu *pre_vgpu)
> +static void vgpu_update_timeslice(struct intel_vgpu *vgpu, ktime_t cur_time)
>  {
>  	ktime_t delta_ts;
> -	struct vgpu_sched_data *vgpu_data = pre_vgpu->sched_data;
> +	struct vgpu_sched_data *vgpu_data;
>  
> -	delta_ts = vgpu_data->sched_out_time - vgpu_data->sched_in_time;
> +	if (vgpu == NULL || vgpu == vgpu->gvt->idle_vgpu)
> +		return;

Use if (!vgpu || ...)

>  
> -	vgpu_data->sched_time += delta_ts;
> -	vgpu_data->left_ts -= delta_ts;
> +	vgpu_data = vgpu->sched_data;
> +	delta_ts = ktime_sub(cur_time, vgpu_data->sched_in_time);
> +	vgpu_data->sched_time = ktime_add(vgpu_data->sched_time, delta_ts);
> +	vgpu_data->left_ts = ktime_sub(vgpu_data->left_ts, delta_ts);
> +	vgpu_data->sched_in_time = cur_time;
>  }
>  
>  #define GVT_TS_BALANCE_PERIOD_MS 100
> @@ -151,11 +154,7 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
>  	}
>  
>  	cur_time = ktime_get();
> -	if (scheduler->current_vgpu) {
> -		vgpu_data = scheduler->current_vgpu->sched_data;
> -		vgpu_data->sched_out_time = cur_time;
> -		vgpu_update_timeslice(scheduler->current_vgpu);
> -	}
> +	vgpu_update_timeslice(scheduler->current_vgpu, cur_time);
>  	vgpu_data = scheduler->next_vgpu->sched_data;
>  	vgpu_data->sched_in_time = cur_time;

Can simply put ktime_get() into vgpu_update_timeslice(), and above lines can be removed as well.

>  
> @@ -227,12 +226,14 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
>  void intel_gvt_schedule(struct intel_gvt *gvt)
>  {
>  	struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
> +	ktime_t cur_time;
>  
>  	mutex_lock(&gvt->lock);
> +	cur_time = ktime_get();
>  
>  	if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
>  				(void *)&gvt->service_request)) {
> -		if (ktime_get() >= sched_data->expire_time) {
> +		if (cur_time >= sched_data->expire_time) {
>  			gvt_balance_timeslice(sched_data);
>  			sched_data->expire_time = ktime_add_ms(
>  				sched_data->expire_time,
> @@ -241,6 +242,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
>  	}
>  	clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request);
>  
> +	vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time);
>  	tbs_sched_func(sched_data);
>  
>  	mutex_unlock(&gvt->lock);
> -- 
> 2.7.4
> 
> _______________________________________________
> intel-gvt-dev mailing list
> intel-gvt-dev@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev
> -----Original Message-----
> From: Zhenyu Wang [mailto:zhenyuw@linux.intel.com]
> Sent: Tuesday, April 3, 2018 2:36 PM
> To: Gong, Zhipeng <zhipeng.gong@intel.com>
> Cc: intel-gvt-dev@lists.freedesktop.org; He, Min <min.he@intel.com>
> Subject: Re: [PATCH v2 2/2] drm/i915/gvt: Update time slice more frequently
> 
> On 2018.04.03 09:24:25 +0800, Zhipeng Gong wrote:
> > When there is only one vGPU in GVT-g and it submits workloads
> > continuously, it will not be scheduled out, vgpu_update_timeslice
> > is not called and its sched_in_time is not updated in a long time,
> > which can be several seconds or longer.
> > Once GVT-g pauses to submit workload for this vGPU due to heavy
> > host CPU workload, this vGPU get scheduled out and
> > vgpu_update_timeslice is called, its left_ts will be subtract
> > by a big value from sched_out_time - sched_in_time.
> > When GVT-g is going to submit workload for this vGPU again,
> > it will not be scheduled in until gvt_balance_timeslice reaches
> > stage 0 and reset its left_ts, which introduces several
> > hunderand milliseconds latency.
> >
> > This patch updates time slice in every ms to update sched_in_time
> > timely.
> >
> > v2: revise commit message
> >
> > Signed-off-by: Zhipeng Gong <zhipeng.gong@intel.com>
> > Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
> > Cc: Min He <min.he@intel.com>
> > ---
> >  drivers/gpu/drm/i915/gvt/sched_policy.c | 26 ++++++++++++++------------
> >  1 file changed, 14 insertions(+), 12 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c
> b/drivers/gpu/drm/i915/gvt/sched_policy.c
> > index a8b544a..0169e3b 100644
> > --- a/drivers/gpu/drm/i915/gvt/sched_policy.c
> > +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
> > @@ -53,7 +53,6 @@ struct vgpu_sched_data {
> >  	bool active;
> >
> >  	ktime_t sched_in_time;
> > -	ktime_t sched_out_time;
> >  	ktime_t sched_time;
> >  	ktime_t left_ts;
> >  	ktime_t allocated_ts;
> > @@ -69,15 +68,19 @@ struct gvt_sched_data {
> >  	ktime_t expire_time;
> >  };
> >
> > -static void vgpu_update_timeslice(struct intel_vgpu *pre_vgpu)
> > +static void vgpu_update_timeslice(struct intel_vgpu *vgpu, ktime_t cur_time)
> >  {
> >  	ktime_t delta_ts;
> > -	struct vgpu_sched_data *vgpu_data = pre_vgpu->sched_data;
> > +	struct vgpu_sched_data *vgpu_data;
> >
> > -	delta_ts = vgpu_data->sched_out_time - vgpu_data->sched_in_time;
> > +	if (vgpu == NULL || vgpu == vgpu->gvt->idle_vgpu)
> > +		return;
> 
> Use if (!vgpu || ...)

ok

> >
> > -	vgpu_data->sched_time += delta_ts;
> > -	vgpu_data->left_ts -= delta_ts;
> > +	vgpu_data = vgpu->sched_data;
> > +	delta_ts = ktime_sub(cur_time, vgpu_data->sched_in_time);
> > +	vgpu_data->sched_time = ktime_add(vgpu_data->sched_time, delta_ts);
> > +	vgpu_data->left_ts = ktime_sub(vgpu_data->left_ts, delta_ts);
> > +	vgpu_data->sched_in_time = cur_time;
> >  }
> >
> >  #define GVT_TS_BALANCE_PERIOD_MS 100
> > @@ -151,11 +154,7 @@ static void try_to_schedule_next_vgpu(struct
> intel_gvt *gvt)
> >  	}
> >
> >  	cur_time = ktime_get();
> > -	if (scheduler->current_vgpu) {
> > -		vgpu_data = scheduler->current_vgpu->sched_data;
> > -		vgpu_data->sched_out_time = cur_time;
> > -		vgpu_update_timeslice(scheduler->current_vgpu);
> > -	}
> > +	vgpu_update_timeslice(scheduler->current_vgpu, cur_time);
> >  	vgpu_data = scheduler->next_vgpu->sched_data;
> >  	vgpu_data->sched_in_time = cur_time;
> 
> Can simply put ktime_get() into vgpu_update_timeslice(), and above lines can be
> removed as well.
> 

cur_time is needed to update next vgpu's vgpu_data->sched_in_time, it can not 
be removed. 

> >
> > @@ -227,12 +226,14 @@ static void tbs_sched_func(struct gvt_sched_data
> *sched_data)
> >  void intel_gvt_schedule(struct intel_gvt *gvt)
> >  {
> >  	struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
> > +	ktime_t cur_time;
> >
> >  	mutex_lock(&gvt->lock);
> > +	cur_time = ktime_get();
> >
> >  	if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
> >  				(void *)&gvt->service_request)) {
> > -		if (ktime_get() >= sched_data->expire_time) {
> > +		if (cur_time >= sched_data->expire_time) {
> >  			gvt_balance_timeslice(sched_data);
> >  			sched_data->expire_time = ktime_add_ms(
> >  				sched_data->expire_time,
> > @@ -241,6 +242,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
> >  	}
> >  	clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt-
> >service_request);
> >
> > +	vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time);
> >  	tbs_sched_func(sched_data);
> >
> >  	mutex_unlock(&gvt->lock);
> > --
> > 2.7.4
> >
> > _______________________________________________
> > intel-gvt-dev mailing list
> > intel-gvt-dev@lists.freedesktop.org
> > https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev
> 
> --
> Open Source Technology Center, Intel ltd.
> 
> $gpg --keyserver wwwkeys.pgp.net --recv-keys 4D781827