[1/3] drm/i915: Clearing buffer objects via blitter engine

Submitted by ankitprasad.r.sharma@intel.com on April 11, 2015, 12:23 p.m.

Details

Message ID 1428755032-20605-2-git-send-email-ankitprasad.r.sharma@intel.com
State New
Headers show

Not browsing as part of any series.

Commit Message

ankitprasad.r.sharma@intel.com April 11, 2015, 12:23 p.m.
From: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com>

This patch adds support for clearing buffer objects via blitter
engines. This is particularly useful for clearing out the memory
from stolen region.

v2: Add support for using execlists & PPGTT

testcase: igt/gem_create_stolen

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Deepak S <deepak.s@linux.intel.com>
Signed-off-by: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com>
---
 drivers/gpu/drm/i915/Makefile        |   1 +
 drivers/gpu/drm/i915/i915_drv.h      |   4 +
 drivers/gpu/drm/i915/i915_gem_exec.c | 197 +++++++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_lrc.c     |   2 +-
 drivers/gpu/drm/i915/intel_lrc.h     |   2 +
 5 files changed, 205 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/i915/i915_gem_exec.c

Patch hide | download patch | download mbox

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index a69002e..711a87d 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -25,6 +25,7 @@  i915-y += i915_cmd_parser.o \
 	  i915_gem_debug.o \
 	  i915_gem_dmabuf.o \
 	  i915_gem_evict.o \
+	  i915_gem_exec.o \
 	  i915_gem_execbuffer.o \
 	  i915_gem_gtt.o \
 	  i915_gem.o \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index eb38cd1..21a2b1f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2927,6 +2927,10 @@  int __must_check i915_gem_evict_something(struct drm_device *dev,
 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
 int i915_gem_evict_everything(struct drm_device *dev);
 
+/* i915_gem_exec.c */
+int i915_gem_exec_clear_object(struct drm_i915_gem_object *obj,
+			       struct drm_i915_file_private *file_priv);
+
 /* belongs in i915_gem_gtt.h */
 static inline void i915_gem_chipset_flush(struct drm_device *dev)
 {
diff --git a/drivers/gpu/drm/i915/i915_gem_exec.c b/drivers/gpu/drm/i915/i915_gem_exec.c
new file mode 100644
index 0000000..8acc28c
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_exec.c
@@ -0,0 +1,197 @@ 
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Chris Wilson <chris at chris-wilson.co.uk>
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+#define GEN8_COLOR_BLT_CMD (2<<29 | 0x50<<22)
+
+#define BPP_8 0
+#define BPP_16 (1<<24)
+#define BPP_32 (1<<25 | 1<<24)
+
+#define ROP_FILL_COPY (0xf0 << 16)
+
+static int i915_gem_exec_flush_object(struct drm_i915_gem_object *obj,
+				      struct intel_engine_cs *ring,
+				      struct intel_context *ctx)
+{
+	int ret;
+	struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+
+	ret = i915_gem_object_sync(obj, ring);
+	if (ret)
+		return ret;
+
+	if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) {
+		if (i915_gem_clflush_object(obj, false))
+			i915_gem_chipset_flush(obj->base.dev);
+		obj->base.write_domain &= ~I915_GEM_DOMAIN_CPU;
+	}
+	if (obj->base.write_domain & I915_GEM_DOMAIN_GTT) {
+		wmb();
+		obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
+	}
+
+
+	return i915.enable_execlists ?
+			logical_ring_invalidate_all_caches(ringbuf, ctx) :
+			intel_ring_invalidate_all_caches(ring);
+}
+
+static void i915_gem_exec_dirty_object(struct drm_i915_gem_object *obj,
+				       struct intel_engine_cs *ring,
+				       struct i915_address_space *vm)
+{
+	struct drm_i915_gem_request *req;
+	req = intel_ring_get_request(ring);
+
+	i915_gem_request_assign(&obj->last_write_req, req);
+	obj->base.read_domains = I915_GEM_DOMAIN_RENDER;
+	obj->base.write_domain = I915_GEM_DOMAIN_RENDER;
+	i915_vma_move_to_active(i915_gem_obj_to_vma(obj, vm), ring);
+	obj->dirty = 1;
+
+	ring->gpu_caches_dirty = true;
+}
+
+int i915_gem_exec_clear_object(struct drm_i915_gem_object *obj,
+			       struct drm_i915_file_private *file_priv)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *ring;
+	struct intel_context *ctx;
+	struct intel_ringbuffer *ringbuf;
+	struct i915_address_space *vm;
+	int ret = 0;
+
+	lockdep_assert_held(&dev->struct_mutex);
+
+	ring = &dev_priv->ring[HAS_BLT(dev) ? BCS : RCS];
+	ctx = i915_gem_context_get(file_priv, DEFAULT_CONTEXT_HANDLE);
+	if (ctx->ppgtt)
+		vm = &ctx->ppgtt->base;
+	else
+		vm = &dev_priv->gtt.base;
+
+	if (!ctx->engine[ring->id].state) {
+		ret = intel_lr_context_deferred_create(ctx, ring);
+		if (ret)
+			return ret;
+	}
+
+	ringbuf = ctx->engine[ring->id].ringbuf;
+
+	ret = i915_gem_object_pin(obj, vm, PAGE_SIZE, 0);
+	if (ret)
+		return ret;
+
+	if (obj->tiling_mode && INTEL_INFO(dev)->gen <= 3) {
+		ret = i915_gem_object_put_fence(obj);
+		if (ret)
+			goto unpin;
+	}
+
+	ret = i915_gem_exec_flush_object(obj, ring, ctx);
+	if (ret)
+		goto unpin;
+
+	if (i915.enable_execlists) {
+		if (dev_priv->info.gen >= 8) {
+			ret = intel_logical_ring_begin(ringbuf, ctx, 8);
+			if (ret)
+				goto unpin;
+
+			intel_logical_ring_emit(ringbuf, GEN8_COLOR_BLT_CMD |
+							 BLT_WRITE_RGBA |
+							 (7-2));
+			intel_logical_ring_emit(ringbuf, BPP_32 |
+							 ROP_FILL_COPY |
+							 PAGE_SIZE);
+			intel_logical_ring_emit(ringbuf, 0);
+			intel_logical_ring_emit(ringbuf,
+						obj->base.size >> PAGE_SHIFT
+						<< 16 | PAGE_SIZE / 4);
+			intel_logical_ring_emit(ringbuf,
+						i915_gem_obj_offset(obj, vm));
+			intel_logical_ring_emit(ringbuf, 0);
+			intel_logical_ring_emit(ringbuf, 0);
+			intel_logical_ring_emit(ringbuf, MI_NOOP);
+
+			intel_logical_ring_advance(ringbuf);
+		} else {
+			DRM_ERROR("Execlists not supported for gen %d\n",
+				  dev_priv->info.gen);
+			ret = -EINVAL;
+			goto unpin;
+		}
+	} else {
+		if (IS_GEN8(dev)) {
+			ret = intel_ring_begin(ring, 8);
+			if (ret)
+				goto unpin;
+
+			intel_ring_emit(ring, GEN8_COLOR_BLT_CMD |
+					      BLT_WRITE_RGBA | (7-2));
+			intel_ring_emit(ring, BPP_32 |
+					      ROP_FILL_COPY | PAGE_SIZE);
+			intel_ring_emit(ring, 0);
+			intel_ring_emit(ring,
+					obj->base.size >> PAGE_SHIFT << 16 |
+					PAGE_SIZE / 4);
+			intel_ring_emit(ring, i915_gem_obj_offset(obj, vm));
+			intel_ring_emit(ring, 0);
+			intel_ring_emit(ring, 0);
+			intel_ring_emit(ring, MI_NOOP);
+		} else {
+			ret = intel_ring_begin(ring, 6);
+			if (ret)
+				goto unpin;
+
+			intel_ring_emit(ring, COLOR_BLT_CMD |
+					      BLT_WRITE_RGBA);
+			intel_ring_emit(ring, BPP_32 |
+					      ROP_FILL_COPY | PAGE_SIZE);
+			intel_ring_emit(ring,
+					obj->base.size >> PAGE_SHIFT << 16 |
+					PAGE_SIZE);
+			intel_ring_emit(ring, i915_gem_obj_offset(obj, vm));
+			intel_ring_emit(ring, 0);
+			intel_ring_emit(ring, MI_NOOP);
+		}
+
+		__intel_ring_advance(ring);
+	}
+
+	i915_gem_exec_dirty_object(obj, ring, vm);
+
+unpin:
+	i915_gem_obj_to_vma(obj, vm)->pin_count--;
+	return ret;
+}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index fcb074b..5481638 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -559,7 +559,7 @@  static int execlists_context_queue(struct intel_engine_cs *ring,
 	return 0;
 }
 
-static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
+int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
 					      struct intel_context *ctx)
 {
 	struct intel_engine_cs *ring = ringbuf->ring;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index adb731e4..80a873b 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -42,6 +42,8 @@  int intel_logical_rings_init(struct drm_device *dev);
 
 int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
 				  struct intel_context *ctx);
+int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
+				       struct intel_context *ctx);
 /**
  * intel_logical_ring_advance() - advance the ringbuffer tail
  * @ringbuf: Ringbuffer to advance.