[RFC,03/29] drm/i915: Introduce GVT context creation API

Submitted by Wang, Zhi A on Jan. 28, 2016, 10:21 a.m.

Details

Message ID 1453976511-27322-4-git-send-email-zhi.a.wang@intel.com
State New
Headers show
Series "iGVT-g implementation in i915" ( rev: 1 ) in Intel GFX

Not browsing as part of any series.

Commit Message

Wang, Zhi A Jan. 28, 2016, 10:21 a.m.
GVT workload scheduler needs special host LRC contexts, the so called "shadow
LRC context" to submit guest workload to host i915. During the guest
workload submission, GVT fills the shadow LRC context with the content of
guest LRC context: engine context is copied without changes, ring context is
mostly owned by host i915, except the PPGTT root pointers, which will be
filled with the shadow PPGTT page table root pointers managed by GVT-g.

The GVT-g workload scheduler flow:

         +-----------+                   +-----------+
         | GVT Guest |                   | GVT Guest |
         +-+-----^---+                   +-+-----^---+
           |     |                         |     |
           |     | GVT-g                   |     | GVT-g
vELSP write|     | emulates     vELSP write|     | emulates
           |     | Execlist/CSB            |     | Execlist/CSB
           |     | Status                  |     | Status
           |     |                         |     |
    +------v-----+-------------------------v-----+---------+
    |           GVT Virtual Execlist Submission            |
    +------+-------------------------------+---------------+
           |                               |
           | Per-VM/Ring Workoad Q         | Per-VM/Ring Workload Q
   +---------------------+--+      +------------------------+
       +---v--------+    ^             +---v--------+
       |GVT Workload|... |             |GVT Workload|...
       +------------+    |             +------------+
                         |
                         | Pick Workload from Q
    +--------------------+---------------------------------+
    |                GVT Workload Scheduler                |
    +--------------------+---------------------------------+
                         |         * Shadow guest LRC context
                  +------v------+  * Shadow guest ring buffer
                  | GVT Context |  * Scan/Patch guest RB instructions
                  +------+------+
                         |
                         v
              Host i915 GEM Submission

Guest ELSP submission will be wrapped into a GVT workload data structure.
When a guest is scheduled, workload scheduler picks the GVT workload from
the per-VM/ring Q, then prepare to dispatch it through host i915 GEM
submission.

The GVT workload lifecycle:

- Workload scheduler populates the GVT LRC context with the content of
guest LRC context
- Workload scheduler populates the GVT ring buffer with the instructions
from guest ring buffer
- Workload scheduler populates the PDPs in the GVT LRC context with shadow
PPGTT PDPs
- Workload scheduler submits this context through i915 GEM submission
interface
- Once the i915 gem request is finished, GVT-g waits until the GVT LRC context
is idle.
- i915 LRC routines notifies workload scheduler the LRC context is idle.
- Workload scheduler updates the guest LRC context with the content of GVT LRC
context
- Emulate CSB and context switch interrupt to guest

Signed-off-by: Zhi Wang <zhi.a.wang@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h         | 20 +++++++++++++++++
 drivers/gpu/drm/i915/i915_gem_context.c | 38 +++++++++++++++++++++++++++++++++
 2 files changed, 58 insertions(+)

Patch hide | download patch | download mbox

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index db3c79b..fc5ddee 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -890,6 +890,24 @@  struct intel_context {
 	} engine[I915_NUM_RINGS];
 
 	struct list_head link;
+
+	/* Is a GVT context ? */
+	bool gvt_context;
+	/* Used by GVT workload scheduler. */
+	void *gvt_context_private_data[I915_NUM_RINGS];
+	/*
+	 * As GVT context may comes from different guest,
+	 * the addressing mode may be different
+	 */
+	u32 gvt_context_addressing_mode[I915_NUM_RINGS];
+	/*
+	 * Called when GVT context is scheduled-in
+	 */
+	void (*gvt_context_schedule_in)(void *data);
+	/*
+	 * Called when GVT context is scheduled-out
+	 */
+	void (*gvt_context_schedule_out)(void *data);
 };
 
 enum fb_op_origin {
@@ -2866,6 +2884,8 @@  struct drm_i915_gem_object *i915_gem_object_create_from_data(
 void i915_gem_free_object(struct drm_gem_object *obj);
 void i915_gem_vma_destroy(struct i915_vma *vma);
 
+struct intel_context * i915_gem_create_gvt_context(struct drm_device *dev);
+
 /* Flags used by pin/bind&friends. */
 #define PIN_MAPPABLE	(1<<0)
 #define PIN_NONBLOCK	(1<<1)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 6a4f64b..410540a 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -260,6 +260,44 @@  err_out:
 	return ERR_PTR(ret);
 }
 
+struct intel_context *
+i915_gem_create_gvt_context(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_context *ctx;
+	int ret;
+	int i;
+
+	mutex_lock(&dev->struct_mutex);
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (ctx == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	kref_init(&ctx->ref);
+	list_add_tail(&ctx->link, &dev_priv->context_list);
+
+	ctx->i915 = dev_priv;
+	ctx->file_priv = NULL;
+	ctx->user_handle = -1;
+	ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
+	ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
+
+	ctx->gvt_context = true;
+
+	for (i = 0; i < I915_NUM_RINGS; i++) {
+		ret = intel_lr_context_deferred_alloc(ctx, &dev_priv->ring[i]);
+		if (ret) {
+			i915_gem_context_unreference(ctx);
+			ctx = NULL;
+			goto out;
+		}
+	}
+out:
+	mutex_unlock(&dev->struct_mutex);
+	return ctx;
+}
+
 /**
  * The default context needs to exist per ring that uses contexts. It stores the
  * context state of the GPU for applications that don't utilize HW contexts, as