[1/5] drm/i915: Allow i915 to manage the vma offset nodes instead of drm core

Submitted by Abdiel Janulgue on Aug. 26, 2019, 12:20 p.m.

Details

Message ID 20190826122102.32400-1-abdiel.janulgue@linux.intel.com
State New
Headers show
Series "Series without cover letter" ( rev: 1 ) in Intel GFX

Not browsing as part of any series.

Commit Message

Abdiel Janulgue Aug. 26, 2019, 12:20 p.m.
Have i915 replace the core drm_gem_mmap implementation to overcome its
limitation in having only a single mmap offset node per gem object.
The change allows us to have multiple mmap offsets per object. This
enables a mmapping instance to use unique fault-handlers per user vma.

This enables us to store extra data within vma->vm_private_data and assign
the pagefault ops for each mmap instance allowing objects to use multiple
fault handlers depending on its backing storage.

Signed-off-by: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_mman.c      | 193 ++++++++++++++++--
 drivers/gpu/drm/i915/gem/i915_gem_object.c    |  19 ++
 drivers/gpu/drm/i915/gem/i915_gem_object.h    |   7 +-
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |  18 ++
 .../drm/i915/gem/selftests/i915_gem_mman.c    |  12 +-
 drivers/gpu/drm/i915/gt/intel_reset.c         |  11 +-
 drivers/gpu/drm/i915/i915_drv.c               |   9 +-
 drivers/gpu/drm/i915/i915_drv.h               |   1 +
 drivers/gpu/drm/i915/i915_vma.c               |  20 +-
 9 files changed, 254 insertions(+), 36 deletions(-)

Patch hide | download patch | download mbox

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 595539a09e38..fb7e39f115d7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -218,7 +218,8 @@  vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 {
 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
 	struct vm_area_struct *area = vmf->vma;
-	struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
+	struct i915_mmap_offset *priv = area->vm_private_data;
+	struct drm_i915_gem_object *obj = priv->obj;
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *i915 = to_i915(dev);
 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
@@ -372,13 +373,20 @@  vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
 {
 	struct i915_vma *vma;
+	struct i915_mmap_offset *mmo;
 
 	GEM_BUG_ON(!obj->userfault_count);
 
 	obj->userfault_count = 0;
 	list_del(&obj->userfault_link);
-	drm_vma_node_unmap(&obj->base.vma_node,
-			   obj->base.dev->anon_inode->i_mapping);
+
+	mutex_lock(&obj->mmo_lock);
+	list_for_each_entry(mmo, &obj->mmap_offsets, offset) {
+		if (mmo->mmap_type == I915_MMAP_TYPE_GTT)
+			drm_vma_node_unmap(&mmo->vma_node,
+					   obj->base.dev->anon_inode->i_mapping);
+	}
+	mutex_unlock(&obj->mmo_lock);
 
 	for_each_ggtt_vma(vma, obj)
 		i915_vma_unset_userfault(vma);
@@ -433,14 +441,33 @@  void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 }
 
-static int create_mmap_offset(struct drm_i915_gem_object *obj)
+static void init_mmap_offset(struct drm_i915_gem_object *obj,
+			     struct i915_mmap_offset *mmo)
+{
+	mutex_lock(&obj->mmo_lock);
+	kref_init(&mmo->ref);
+	list_add(&mmo->offset, &obj->mmap_offsets);
+	mutex_unlock(&obj->mmo_lock);
+
+	mmo->obj = obj;
+}
+
+static int create_mmap_offset(struct drm_i915_gem_object *obj,
+			      struct i915_mmap_offset *mmo)
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+	struct drm_device *dev = obj->base.dev;
 	int err;
 
-	err = drm_gem_create_mmap_offset(&obj->base);
-	if (likely(!err))
+	drm_vma_node_reset(&mmo->vma_node);
+	if (mmo->file)
+		drm_vma_node_allow(&mmo->vma_node, mmo->file);
+	err = drm_vma_offset_add(dev->vma_offset_manager, &mmo->vma_node,
+				 obj->base.size / PAGE_SIZE);
+	if (likely(!err)) {
+		init_mmap_offset(obj, mmo);
 		return 0;
+	}
 
 	/* Attempt to reap some mmap space from dead objects */
 	do {
@@ -451,32 +478,48 @@  static int create_mmap_offset(struct drm_i915_gem_object *obj)
 			break;
 
 		i915_gem_drain_freed_objects(i915);
-		err = drm_gem_create_mmap_offset(&obj->base);
-		if (!err)
+		err = drm_vma_offset_add(dev->vma_offset_manager, &mmo->vma_node,
+					 obj->base.size / PAGE_SIZE);
+		if (!err) {
+			init_mmap_offset(obj, mmo);
 			break;
+		}
 
 	} while (flush_delayed_work(&i915->gem.retire_work));
 
 	return err;
 }
 
-int
-i915_gem_mmap_gtt(struct drm_file *file,
-		  struct drm_device *dev,
-		  u32 handle,
-		  u64 *offset)
+static int
+__assign_gem_object_mmap_data(struct drm_file *file,
+			      u32 handle,
+			      enum i915_mmap_type mmap_type,
+			      u64 *offset)
 {
 	struct drm_i915_gem_object *obj;
+	struct i915_mmap_offset *mmo;
 	int ret;
 
 	obj = i915_gem_object_lookup(file, handle);
 	if (!obj)
 		return -ENOENT;
 
-	ret = create_mmap_offset(obj);
-	if (ret == 0)
-		*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
+	mmo = kzalloc(sizeof(*mmo), GFP_KERNEL);
+	if (!mmo) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	mmo->file = file;
+	ret = create_mmap_offset(obj, mmo);
+	if (ret) {
+		kfree(mmo);
+		goto err;
+	}
 
+	mmo->mmap_type = mmap_type;
+	*offset = drm_vma_node_offset_addr(&mmo->vma_node);
+err:
 	i915_gem_object_put(obj);
 	return ret;
 }
@@ -500,9 +543,123 @@  int
 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file)
 {
-	struct drm_i915_gem_mmap_gtt *args = data;
+	struct drm_i915_gem_mmap_offset *args = data;
+
+	return __assign_gem_object_mmap_data(file, args->handle,
+					     I915_MMAP_TYPE_GTT,
+					     &args->offset);
+}
+
+void i915_mmap_offset_object_release(struct kref *ref)
+{
+	struct i915_mmap_offset *mmo = container_of(ref,
+						    struct i915_mmap_offset,
+						    ref);
+	struct drm_i915_gem_object *obj = mmo->obj;
+	struct drm_device *dev = obj->base.dev;
+
+	lockdep_assert_held(&obj->mmo_lock);
+
+	if (mmo->file)
+		drm_vma_node_revoke(&mmo->vma_node, mmo->file);
+	drm_vma_offset_remove(dev->vma_offset_manager, &mmo->vma_node);
+	list_del(&mmo->offset);
 
-	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
+	kfree(mmo);
+}
+
+static void i915_gem_vm_open(struct vm_area_struct *vma)
+{
+	struct i915_mmap_offset *priv = vma->vm_private_data;
+	struct drm_i915_gem_object *obj = priv->obj;
+
+	i915_gem_object_get(obj);
+	kref_get(&priv->ref);
+}
+
+static void i915_gem_vm_close(struct vm_area_struct *vma)
+{
+	struct i915_mmap_offset *priv = vma->vm_private_data;
+	struct drm_i915_gem_object *obj = priv->obj;
+
+	mutex_lock(&obj->mmo_lock);
+	kref_put(&priv->ref, i915_mmap_offset_object_release);
+	mutex_unlock(&obj->mmo_lock);
+
+	i915_gem_object_put(obj);
+}
+
+static const struct vm_operations_struct i915_gem_gtt_vm_ops = {
+	.fault = i915_gem_fault,
+	.open = i915_gem_vm_open,
+	.close = i915_gem_vm_close,
+};
+
+/* This overcomes the limitation in drm_gem_mmap's assignment of a
+ * drm_gem_object as the vma->vm_private_data. Since we need to
+ * be able to resolve multiple mmap offsets which could be tied
+ * to a single gem object.
+ */
+int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_vma_offset_node *node;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->minor->dev;
+	struct i915_mmap_offset *mmo = NULL;
+	struct drm_gem_object *obj = NULL;
+
+	if (drm_dev_is_unplugged(dev))
+		return -ENODEV;
+
+	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
+	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
+						  vma->vm_pgoff,
+						  vma_pages(vma));
+	if (likely(node)) {
+		mmo = container_of(node, struct i915_mmap_offset,
+				   vma_node);
+		/*
+		 * Skip 0-refcnted objects as it is in the process of being
+		 * destroyed and will be invalid when the vma manager lock
+		 * is released.
+		 */
+		obj = &mmo->obj->base;
+		if (!kref_get_unless_zero(&obj->refcount))
+			obj = NULL;
+	}
+	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+
+	if (!obj)
+		return -EINVAL;
+
+	if (!drm_vma_node_is_allowed(node, priv)) {
+		drm_gem_object_put_unlocked(obj);
+		return -EACCES;
+	}
+
+	if (to_intel_bo(obj)->readonly) {
+		if (vma->vm_flags & VM_WRITE) {
+			drm_gem_object_put_unlocked(obj);
+			return -EINVAL;
+		}
+
+		vma->vm_flags &= ~VM_MAYWRITE;
+	}
+
+	vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+	vma->vm_private_data = mmo;
+
+	vma->vm_ops = &i915_gem_gtt_vm_ops;
+
+	/*
+	 * Take a ref for our mmap_offset object. The reference is cleaned
+	 * up when the vma is closed.
+	 */
+	kref_get(&mmo->ref);
+
+	return 0;
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index d7855dc5a5c5..eb3d7f3528f3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -58,6 +58,9 @@  void i915_gem_object_init(struct drm_i915_gem_object *obj,
 
 	INIT_LIST_HEAD(&obj->lut_list);
 
+	mutex_init(&obj->mmo_lock);
+	INIT_LIST_HEAD(&obj->mmap_offsets);
+
 	init_rcu_head(&obj->rcu);
 
 	obj->ops = ops;
@@ -94,6 +97,7 @@  void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
 	struct drm_i915_gem_object *obj = to_intel_bo(gem);
 	struct drm_i915_file_private *fpriv = file->driver_priv;
 	struct i915_lut_handle *lut, *ln;
+	struct i915_mmap_offset *mmo, *on;
 	LIST_HEAD(close);
 
 	i915_gem_object_lock(obj);
@@ -108,6 +112,13 @@  void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
 	}
 	i915_gem_object_unlock(obj);
 
+	mutex_lock(&obj->mmo_lock);
+	list_for_each_entry_safe(mmo, on, &obj->mmap_offsets, offset) {
+		if (mmo->file == file)
+			kref_put(&mmo->ref, i915_mmap_offset_object_release);
+	}
+	mutex_unlock(&obj->mmo_lock);
+
 	list_for_each_entry_safe(lut, ln, &close, obj_link) {
 		struct i915_gem_context *ctx = lut->ctx;
 		struct i915_vma *vma;
@@ -156,6 +167,7 @@  static void __i915_gem_free_objects(struct drm_i915_private *i915,
 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
 	llist_for_each_entry_safe(obj, on, freed, freed) {
 		struct i915_vma *vma, *vn;
+		struct i915_mmap_offset *mmo, *on;
 
 		trace_i915_gem_object_destroy(obj);
 
@@ -169,6 +181,13 @@  static void __i915_gem_free_objects(struct drm_i915_private *i915,
 		GEM_BUG_ON(!list_empty(&obj->vma.list));
 		GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree));
 
+		i915_gem_object_release_mmap(obj);
+
+		mutex_lock(&obj->mmo_lock);
+		list_for_each_entry_safe(mmo, on, &obj->mmap_offsets, offset)
+			kref_put(&mmo->ref, i915_mmap_offset_object_release);
+		mutex_unlock(&obj->mmo_lock);
+
 		mutex_unlock(&i915->drm.struct_mutex);
 
 		GEM_BUG_ON(atomic_read(&obj->bind_count));
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 5efb9936e05b..d667ed8bb0a6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -125,13 +125,13 @@  void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
 static inline void
 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
 {
-	obj->base.vma_node.readonly = true;
+	obj->readonly = true;
 }
 
 static inline bool
 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
 {
-	return obj->base.vma_node.readonly;
+	return obj->readonly;
 }
 
 static inline bool
@@ -423,6 +423,9 @@  int i915_gem_object_wait(struct drm_i915_gem_object *obj,
 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 				  unsigned int flags,
 				  const struct i915_sched_attr *attr);
+
+void i915_mmap_offset_object_release(struct kref *ref);
+
 #define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
 
 #endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index ede0eb4218a8..d74ddb479318 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -60,6 +60,19 @@  struct drm_i915_gem_object_ops {
 	void (*release)(struct drm_i915_gem_object *obj);
 };
 
+enum i915_mmap_type {
+	I915_MMAP_TYPE_GTT = 0,
+};
+
+struct i915_mmap_offset {
+	struct drm_vma_offset_node vma_node;
+	struct drm_i915_gem_object *obj;
+	struct drm_file *file;
+	enum i915_mmap_type mmap_type;
+	struct kref ref;
+	struct list_head offset;
+};
+
 struct drm_i915_gem_object {
 	struct drm_gem_object base;
 
@@ -115,6 +128,11 @@  struct drm_i915_gem_object {
 	unsigned int userfault_count;
 	struct list_head userfault_link;
 
+	/* Protects access to mmap offsets */
+	struct mutex mmo_lock;
+	struct list_head mmap_offsets;
+	bool readonly:1;
+
 	I915_SELFTEST_DECLARE(struct list_head st_link);
 
 	/*
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 1d27babff0ce..a7513421b1db 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -371,15 +371,20 @@  static bool assert_mmap_offset(struct drm_i915_private *i915,
 			       int expected)
 {
 	struct drm_i915_gem_object *obj;
+	/* refcounted in create_mmap_offset */
+	struct i915_mmap_offset *mmo = kzalloc(sizeof(*mmo), GFP_KERNEL);
 	int err;
 
 	obj = i915_gem_object_create_internal(i915, size);
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
-	err = create_mmap_offset(obj);
+	err = create_mmap_offset(obj, mmo);
+	if (err)
+		kfree(mmo);
 	i915_gem_object_put(obj);
 
+
 	return err == expected;
 }
 
@@ -422,6 +427,8 @@  static int igt_mmap_offset_exhaustion(void *arg)
 	struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
 	struct drm_i915_gem_object *obj;
 	struct drm_mm_node resv, *hole;
+	/* refcounted in create_mmap_offset */
+	struct i915_mmap_offset *mmo = kzalloc(sizeof(*mmo), GFP_KERNEL);
 	u64 hole_start, hole_end;
 	int loop, err;
 
@@ -465,9 +472,10 @@  static int igt_mmap_offset_exhaustion(void *arg)
 		goto out;
 	}
 
-	err = create_mmap_offset(obj);
+	err = create_mmap_offset(obj, mmo);
 	if (err) {
 		pr_err("Unable to insert object into reclaimed hole\n");
+		kfree(mmo);
 		goto err_obj;
 	}
 
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index b9d84d52e986..7b82ef888b96 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -628,6 +628,7 @@  static void revoke_mmaps(struct intel_gt *gt)
 
 	for (i = 0; i < gt->ggtt->num_fences; i++) {
 		struct drm_vma_offset_node *node;
+		struct i915_mmap_offset *mmo;
 		struct i915_vma *vma;
 		u64 vma_offset;
 
@@ -641,10 +642,18 @@  static void revoke_mmaps(struct intel_gt *gt)
 		GEM_BUG_ON(vma->fence != &gt->ggtt->fence_regs[i]);
 		node = &vma->obj->base.vma_node;
 		vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
-		unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
+
+		list_for_each_entry(mmo, &vma->obj->mmap_offsets, offset) {
+			node = &mmo->vma_node;
+			if (!drm_mm_node_allocated(&node->vm_node) ||
+			    mmo->mmap_type != I915_MMAP_TYPE_GTT)
+				continue;
+
+			unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
 				    drm_vma_node_offset_addr(node) + vma_offset,
 				    vma->size,
 				    1);
+		}
 	}
 }
 
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 1c4576a4a5e9..bdd597200984 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2722,18 +2722,12 @@  const struct dev_pm_ops i915_pm_ops = {
 	.runtime_resume = intel_runtime_resume,
 };
 
-static const struct vm_operations_struct i915_gem_vm_ops = {
-	.fault = i915_gem_fault,
-	.open = drm_gem_vm_open,
-	.close = drm_gem_vm_close,
-};
-
 static const struct file_operations i915_driver_fops = {
 	.owner = THIS_MODULE,
 	.open = drm_open,
 	.release = drm_release,
 	.unlocked_ioctl = drm_ioctl,
-	.mmap = drm_gem_mmap,
+	.mmap = i915_gem_mmap,
 	.poll = drm_poll,
 	.read = drm_read,
 	.compat_ioctl = i915_compat_ioctl,
@@ -2822,7 +2816,6 @@  static struct drm_driver driver = {
 
 	.gem_close_object = i915_gem_close_object,
 	.gem_free_object_unlocked = i915_gem_free_object,
-	.gem_vm_ops = &i915_gem_vm_ops,
 
 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b42651a387d9..bf2927e88e14 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2327,6 +2327,7 @@  int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
 void i915_gem_suspend(struct drm_i915_private *dev_priv);
 void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
 void i915_gem_resume(struct drm_i915_private *dev_priv);
+int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 vm_fault_t i915_gem_fault(struct vm_fault *vmf);
 
 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index e0e677b2a3a9..677323644319 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -861,7 +861,8 @@  static void __i915_vma_iounmap(struct i915_vma *vma)
 
 void i915_vma_revoke_mmap(struct i915_vma *vma)
 {
-	struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
+	struct drm_vma_offset_node *node;
+	struct i915_mmap_offset *mmo;
 	u64 vma_offset;
 
 	lockdep_assert_held(&vma->vm->mutex);
@@ -873,10 +874,19 @@  void i915_vma_revoke_mmap(struct i915_vma *vma)
 	GEM_BUG_ON(!vma->obj->userfault_count);
 
 	vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
-	unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
-			    drm_vma_node_offset_addr(node) + vma_offset,
-			    vma->size,
-			    1);
+
+	list_for_each_entry(mmo, &vma->obj->mmap_offsets, offset) {
+		node = &mmo->vma_node;
+		/* Only gtt-mmaps for this vma should be unmapped */
+		if (!drm_mm_node_allocated(&node->vm_node) ||
+		    mmo->mmap_type != I915_MMAP_TYPE_GTT)
+			continue;
+
+		unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
+				    drm_vma_node_offset_addr(node) + vma_offset,
+				    vma->size,
+				    1);
+	}
 
 	i915_vma_unset_userfault(vma);
 	if (!--vma->obj->userfault_count)

Comments

Quoting Abdiel Janulgue (2019-08-26 13:20:58)
> -static int create_mmap_offset(struct drm_i915_gem_object *obj)
> +static void init_mmap_offset(struct drm_i915_gem_object *obj,
> +                            struct i915_mmap_offset *mmo)
> +{
> +       mutex_lock(&obj->mmo_lock);

This lock should only be guarding obj->mmap_offsets. You don't need to
take it around every kref, just be careful to remove the link on close.

> +       kref_init(&mmo->ref);
> +       list_add(&mmo->offset, &obj->mmap_offsets);
> +       mutex_unlock(&obj->mmo_lock);
> +
> +       mmo->obj = obj;
> +}

> +/* This overcomes the limitation in drm_gem_mmap's assignment of a
> + * drm_gem_object as the vma->vm_private_data. Since we need to
> + * be able to resolve multiple mmap offsets which could be tied
> + * to a single gem object.
> + */
> +int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
> +{
> +       struct drm_vma_offset_node *node;
> +       struct drm_file *priv = filp->private_data;
> +       struct drm_device *dev = priv->minor->dev;
> +       struct i915_mmap_offset *mmo = NULL;
> +       struct drm_gem_object *obj = NULL;
> +
> +       if (drm_dev_is_unplugged(dev))
> +               return -ENODEV;
> +
> +       drm_vma_offset_lock_lookup(dev->vma_offset_manager);
> +       node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
> +                                                 vma->vm_pgoff,
> +                                                 vma_pages(vma));
> +       if (likely(node)) {
> +               mmo = container_of(node, struct i915_mmap_offset,
> +                                  vma_node);
> +               /*
> +                * Skip 0-refcnted objects as it is in the process of being
> +                * destroyed and will be invalid when the vma manager lock
> +                * is released.
> +                */
> +               obj = &mmo->obj->base;
> +               if (!kref_get_unless_zero(&obj->refcount))
> +                       obj = NULL;

Hmm, references are still weird. This doesn't seem like it protects
against

Thread A			Thread B
  mmap(fd, offset_of_A);	  gem_close(fd, A);


Time for a gem_mmap_gtt/close-race.
-Chris
Quoting Abdiel Janulgue (2019-08-26 13:20:58)
> @@ -641,10 +642,18 @@ static void revoke_mmaps(struct intel_gt *gt)
>                 GEM_BUG_ON(vma->fence != &gt->ggtt->fence_regs[i]);
>                 node = &vma->obj->base.vma_node;
>                 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
> -               unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
> +
> +               list_for_each_entry(mmo, &vma->obj->mmap_offsets, offset) {
> +                       node = &mmo->vma_node;
> +                       if (!drm_mm_node_allocated(&node->vm_node) ||
> +                           mmo->mmap_type != I915_MMAP_TYPE_GTT)
> +                               continue;

That list needs locking as is not protected by the reset srcu (and you
are not allowed your own locking in here, unless you have a jolly good
reason and can prove it will never block a reset).

One thing to observe is is that you only ever need the mmo associated
with a fence for revocation upon reset, and you could use a local list
for tracking the active mmo.
-Chris
On Mon, Aug 26, 2019 at 2:21 PM Abdiel Janulgue
<abdiel.janulgue@linux.intel.com> wrote:
>
> Have i915 replace the core drm_gem_mmap implementation to overcome its
> limitation in having only a single mmap offset node per gem object.
> The change allows us to have multiple mmap offsets per object. This
> enables a mmapping instance to use unique fault-handlers per user vma.
>
> This enables us to store extra data within vma->vm_private_data and assign
> the pagefault ops for each mmap instance allowing objects to use multiple
> fault handlers depending on its backing storage.
>
> Signed-off-by: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> Cc: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/gem/i915_gem_mman.c      | 193 ++++++++++++++++--
>  drivers/gpu/drm/i915/gem/i915_gem_object.c    |  19 ++
>  drivers/gpu/drm/i915/gem/i915_gem_object.h    |   7 +-
>  .../gpu/drm/i915/gem/i915_gem_object_types.h  |  18 ++
>  .../drm/i915/gem/selftests/i915_gem_mman.c    |  12 +-
>  drivers/gpu/drm/i915/gt/intel_reset.c         |  11 +-
>  drivers/gpu/drm/i915/i915_drv.c               |   9 +-
>  drivers/gpu/drm/i915/i915_drv.h               |   1 +
>  drivers/gpu/drm/i915/i915_vma.c               |  20 +-
>  9 files changed, 254 insertions(+), 36 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> index 595539a09e38..fb7e39f115d7 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> @@ -218,7 +218,8 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
>  {
>  #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
>         struct vm_area_struct *area = vmf->vma;
> -       struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
> +       struct i915_mmap_offset *priv = area->vm_private_data;
> +       struct drm_i915_gem_object *obj = priv->obj;
>         struct drm_device *dev = obj->base.dev;
>         struct drm_i915_private *i915 = to_i915(dev);
>         struct intel_runtime_pm *rpm = &i915->runtime_pm;
> @@ -372,13 +373,20 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
>  void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
>  {
>         struct i915_vma *vma;
> +       struct i915_mmap_offset *mmo;
>
>         GEM_BUG_ON(!obj->userfault_count);
>
>         obj->userfault_count = 0;
>         list_del(&obj->userfault_link);
> -       drm_vma_node_unmap(&obj->base.vma_node,
> -                          obj->base.dev->anon_inode->i_mapping);
> +
> +       mutex_lock(&obj->mmo_lock);
> +       list_for_each_entry(mmo, &obj->mmap_offsets, offset) {
> +               if (mmo->mmap_type == I915_MMAP_TYPE_GTT)
> +                       drm_vma_node_unmap(&mmo->vma_node,
> +                                          obj->base.dev->anon_inode->i_mapping);
> +       }
> +       mutex_unlock(&obj->mmo_lock);
>
>         for_each_ggtt_vma(vma, obj)
>                 i915_vma_unset_userfault(vma);
> @@ -433,14 +441,33 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
>         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
>  }
>
> -static int create_mmap_offset(struct drm_i915_gem_object *obj)
> +static void init_mmap_offset(struct drm_i915_gem_object *obj,
> +                            struct i915_mmap_offset *mmo)
> +{
> +       mutex_lock(&obj->mmo_lock);
> +       kref_init(&mmo->ref);
> +       list_add(&mmo->offset, &obj->mmap_offsets);
> +       mutex_unlock(&obj->mmo_lock);
> +
> +       mmo->obj = obj;
> +}
> +
> +static int create_mmap_offset(struct drm_i915_gem_object *obj,
> +                             struct i915_mmap_offset *mmo)
>  {
>         struct drm_i915_private *i915 = to_i915(obj->base.dev);
> +       struct drm_device *dev = obj->base.dev;
>         int err;
>
> -       err = drm_gem_create_mmap_offset(&obj->base);
> -       if (likely(!err))
> +       drm_vma_node_reset(&mmo->vma_node);
> +       if (mmo->file)
> +               drm_vma_node_allow(&mmo->vma_node, mmo->file);
> +       err = drm_vma_offset_add(dev->vma_offset_manager, &mmo->vma_node,
> +                                obj->base.size / PAGE_SIZE);
> +       if (likely(!err)) {
> +               init_mmap_offset(obj, mmo);
>                 return 0;
> +       }
>
>         /* Attempt to reap some mmap space from dead objects */
>         do {
> @@ -451,32 +478,48 @@ static int create_mmap_offset(struct drm_i915_gem_object *obj)
>                         break;
>
>                 i915_gem_drain_freed_objects(i915);
> -               err = drm_gem_create_mmap_offset(&obj->base);
> -               if (!err)
> +               err = drm_vma_offset_add(dev->vma_offset_manager, &mmo->vma_node,
> +                                        obj->base.size / PAGE_SIZE);
> +               if (!err) {
> +                       init_mmap_offset(obj, mmo);
>                         break;
> +               }
>
>         } while (flush_delayed_work(&i915->gem.retire_work));
>
>         return err;
>  }
>
> -int
> -i915_gem_mmap_gtt(struct drm_file *file,
> -                 struct drm_device *dev,
> -                 u32 handle,
> -                 u64 *offset)
> +static int
> +__assign_gem_object_mmap_data(struct drm_file *file,
> +                             u32 handle,
> +                             enum i915_mmap_type mmap_type,
> +                             u64 *offset)
>  {
>         struct drm_i915_gem_object *obj;
> +       struct i915_mmap_offset *mmo;
>         int ret;
>
>         obj = i915_gem_object_lookup(file, handle);
>         if (!obj)
>                 return -ENOENT;
>
> -       ret = create_mmap_offset(obj);
> -       if (ret == 0)
> -               *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
> +       mmo = kzalloc(sizeof(*mmo), GFP_KERNEL);

I got thrown off a bunch of times here reading the code, but I think I
got this right now.

Why exactly do we want multiple vma offsets? Yes this makes it a
drop-in replacement for the old cpu mmap ioctl, which was a bit
dubious design. But if we go all new here, I really wonder about why
this is necessary. No other discrete driver needs this, they all fix
the mmap mode for the lifetime of an object, because flushing stuff is
as expensive as just reallocating (or at least close enough).

I think us going once again our separate route here needs a lot more
justification than just "we've accidentally ended up with uapi like
this 10 years ago".
-Daniel



> +       if (!mmo) {
> +               ret = -ENOMEM;
> +               goto err;
> +       }
> +
> +       mmo->file = file;
> +       ret = create_mmap_offset(obj, mmo);
> +       if (ret) {
> +               kfree(mmo);
> +               goto err;
> +       }
>
> +       mmo->mmap_type = mmap_type;
> +       *offset = drm_vma_node_offset_addr(&mmo->vma_node);
> +err:
>         i915_gem_object_put(obj);
>         return ret;
>  }
> @@ -500,9 +543,123 @@ int
>  i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
>                         struct drm_file *file)
>  {
> -       struct drm_i915_gem_mmap_gtt *args = data;
> +       struct drm_i915_gem_mmap_offset *args = data;
> +
> +       return __assign_gem_object_mmap_data(file, args->handle,
> +                                            I915_MMAP_TYPE_GTT,
> +                                            &args->offset);
> +}
> +
> +void i915_mmap_offset_object_release(struct kref *ref)
> +{
> +       struct i915_mmap_offset *mmo = container_of(ref,
> +                                                   struct i915_mmap_offset,
> +                                                   ref);
> +       struct drm_i915_gem_object *obj = mmo->obj;
> +       struct drm_device *dev = obj->base.dev;
> +
> +       lockdep_assert_held(&obj->mmo_lock);
> +
> +       if (mmo->file)
> +               drm_vma_node_revoke(&mmo->vma_node, mmo->file);
> +       drm_vma_offset_remove(dev->vma_offset_manager, &mmo->vma_node);
> +       list_del(&mmo->offset);
>
> -       return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
> +       kfree(mmo);
> +}
> +
> +static void i915_gem_vm_open(struct vm_area_struct *vma)
> +{
> +       struct i915_mmap_offset *priv = vma->vm_private_data;
> +       struct drm_i915_gem_object *obj = priv->obj;
> +
> +       i915_gem_object_get(obj);
> +       kref_get(&priv->ref);
> +}
> +
> +static void i915_gem_vm_close(struct vm_area_struct *vma)
> +{
> +       struct i915_mmap_offset *priv = vma->vm_private_data;
> +       struct drm_i915_gem_object *obj = priv->obj;
> +
> +       mutex_lock(&obj->mmo_lock);
> +       kref_put(&priv->ref, i915_mmap_offset_object_release);
> +       mutex_unlock(&obj->mmo_lock);
> +
> +       i915_gem_object_put(obj);
> +}
> +
> +static const struct vm_operations_struct i915_gem_gtt_vm_ops = {
> +       .fault = i915_gem_fault,
> +       .open = i915_gem_vm_open,
> +       .close = i915_gem_vm_close,
> +};
> +
> +/* This overcomes the limitation in drm_gem_mmap's assignment of a
> + * drm_gem_object as the vma->vm_private_data. Since we need to
> + * be able to resolve multiple mmap offsets which could be tied
> + * to a single gem object.
> + */
> +int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
> +{
> +       struct drm_vma_offset_node *node;
> +       struct drm_file *priv = filp->private_data;
> +       struct drm_device *dev = priv->minor->dev;
> +       struct i915_mmap_offset *mmo = NULL;
> +       struct drm_gem_object *obj = NULL;
> +
> +       if (drm_dev_is_unplugged(dev))
> +               return -ENODEV;
> +
> +       drm_vma_offset_lock_lookup(dev->vma_offset_manager);
> +       node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
> +                                                 vma->vm_pgoff,
> +                                                 vma_pages(vma));
> +       if (likely(node)) {
> +               mmo = container_of(node, struct i915_mmap_offset,
> +                                  vma_node);
> +               /*
> +                * Skip 0-refcnted objects as it is in the process of being
> +                * destroyed and will be invalid when the vma manager lock
> +                * is released.
> +                */
> +               obj = &mmo->obj->base;
> +               if (!kref_get_unless_zero(&obj->refcount))
> +                       obj = NULL;
> +       }
> +       drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
> +
> +       if (!obj)
> +               return -EINVAL;
> +
> +       if (!drm_vma_node_is_allowed(node, priv)) {
> +               drm_gem_object_put_unlocked(obj);
> +               return -EACCES;
> +       }
> +
> +       if (to_intel_bo(obj)->readonly) {
> +               if (vma->vm_flags & VM_WRITE) {
> +                       drm_gem_object_put_unlocked(obj);
> +                       return -EINVAL;
> +               }
> +
> +               vma->vm_flags &= ~VM_MAYWRITE;
> +       }
> +
> +       vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
> +       vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
> +       vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
> +       vma->vm_private_data = mmo;
> +
> +       vma->vm_ops = &i915_gem_gtt_vm_ops;
> +
> +       /*
> +        * Take a ref for our mmap_offset object. The reference is cleaned
> +        * up when the vma is closed.
> +        */
> +       kref_get(&mmo->ref);
> +
> +       return 0;
>  }
>
>  #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> index d7855dc5a5c5..eb3d7f3528f3 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> @@ -58,6 +58,9 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
>
>         INIT_LIST_HEAD(&obj->lut_list);
>
> +       mutex_init(&obj->mmo_lock);
> +       INIT_LIST_HEAD(&obj->mmap_offsets);
> +
>         init_rcu_head(&obj->rcu);
>
>         obj->ops = ops;
> @@ -94,6 +97,7 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
>         struct drm_i915_gem_object *obj = to_intel_bo(gem);
>         struct drm_i915_file_private *fpriv = file->driver_priv;
>         struct i915_lut_handle *lut, *ln;
> +       struct i915_mmap_offset *mmo, *on;
>         LIST_HEAD(close);
>
>         i915_gem_object_lock(obj);
> @@ -108,6 +112,13 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
>         }
>         i915_gem_object_unlock(obj);
>
> +       mutex_lock(&obj->mmo_lock);
> +       list_for_each_entry_safe(mmo, on, &obj->mmap_offsets, offset) {
> +               if (mmo->file == file)
> +                       kref_put(&mmo->ref, i915_mmap_offset_object_release);
> +       }
> +       mutex_unlock(&obj->mmo_lock);
> +
>         list_for_each_entry_safe(lut, ln, &close, obj_link) {
>                 struct i915_gem_context *ctx = lut->ctx;
>                 struct i915_vma *vma;
> @@ -156,6 +167,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
>         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
>         llist_for_each_entry_safe(obj, on, freed, freed) {
>                 struct i915_vma *vma, *vn;
> +               struct i915_mmap_offset *mmo, *on;
>
>                 trace_i915_gem_object_destroy(obj);
>
> @@ -169,6 +181,13 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
>                 GEM_BUG_ON(!list_empty(&obj->vma.list));
>                 GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree));
>
> +               i915_gem_object_release_mmap(obj);
> +
> +               mutex_lock(&obj->mmo_lock);
> +               list_for_each_entry_safe(mmo, on, &obj->mmap_offsets, offset)
> +                       kref_put(&mmo->ref, i915_mmap_offset_object_release);
> +               mutex_unlock(&obj->mmo_lock);
> +
>                 mutex_unlock(&i915->drm.struct_mutex);
>
>                 GEM_BUG_ON(atomic_read(&obj->bind_count));
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
> index 5efb9936e05b..d667ed8bb0a6 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
> @@ -125,13 +125,13 @@ void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
>  static inline void
>  i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
>  {
> -       obj->base.vma_node.readonly = true;
> +       obj->readonly = true;
>  }
>
>  static inline bool
>  i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
>  {
> -       return obj->base.vma_node.readonly;
> +       return obj->readonly;
>  }
>
>  static inline bool
> @@ -423,6 +423,9 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
>  int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
>                                   unsigned int flags,
>                                   const struct i915_sched_attr *attr);
> +
> +void i915_mmap_offset_object_release(struct kref *ref);
> +
>  #define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
>
>  #endif
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> index ede0eb4218a8..d74ddb479318 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> @@ -60,6 +60,19 @@ struct drm_i915_gem_object_ops {
>         void (*release)(struct drm_i915_gem_object *obj);
>  };
>
> +enum i915_mmap_type {
> +       I915_MMAP_TYPE_GTT = 0,
> +};
> +
> +struct i915_mmap_offset {
> +       struct drm_vma_offset_node vma_node;
> +       struct drm_i915_gem_object *obj;
> +       struct drm_file *file;
> +       enum i915_mmap_type mmap_type;
> +       struct kref ref;
> +       struct list_head offset;
> +};
> +
>  struct drm_i915_gem_object {
>         struct drm_gem_object base;
>
> @@ -115,6 +128,11 @@ struct drm_i915_gem_object {
>         unsigned int userfault_count;
>         struct list_head userfault_link;
>
> +       /* Protects access to mmap offsets */
> +       struct mutex mmo_lock;
> +       struct list_head mmap_offsets;
> +       bool readonly:1;
> +
>         I915_SELFTEST_DECLARE(struct list_head st_link);
>
>         /*
> diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
> index 1d27babff0ce..a7513421b1db 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
> @@ -371,15 +371,20 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
>                                int expected)
>  {
>         struct drm_i915_gem_object *obj;
> +       /* refcounted in create_mmap_offset */
> +       struct i915_mmap_offset *mmo = kzalloc(sizeof(*mmo), GFP_KERNEL);
>         int err;
>
>         obj = i915_gem_object_create_internal(i915, size);
>         if (IS_ERR(obj))
>                 return PTR_ERR(obj);
>
> -       err = create_mmap_offset(obj);
> +       err = create_mmap_offset(obj, mmo);
> +       if (err)
> +               kfree(mmo);
>         i915_gem_object_put(obj);
>
> +
>         return err == expected;
>  }
>
> @@ -422,6 +427,8 @@ static int igt_mmap_offset_exhaustion(void *arg)
>         struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
>         struct drm_i915_gem_object *obj;
>         struct drm_mm_node resv, *hole;
> +       /* refcounted in create_mmap_offset */
> +       struct i915_mmap_offset *mmo = kzalloc(sizeof(*mmo), GFP_KERNEL);
>         u64 hole_start, hole_end;
>         int loop, err;
>
> @@ -465,9 +472,10 @@ static int igt_mmap_offset_exhaustion(void *arg)
>                 goto out;
>         }
>
> -       err = create_mmap_offset(obj);
> +       err = create_mmap_offset(obj, mmo);
>         if (err) {
>                 pr_err("Unable to insert object into reclaimed hole\n");
> +               kfree(mmo);
>                 goto err_obj;
>         }
>
> diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
> index b9d84d52e986..7b82ef888b96 100644
> --- a/drivers/gpu/drm/i915/gt/intel_reset.c
> +++ b/drivers/gpu/drm/i915/gt/intel_reset.c
> @@ -628,6 +628,7 @@ static void revoke_mmaps(struct intel_gt *gt)
>
>         for (i = 0; i < gt->ggtt->num_fences; i++) {
>                 struct drm_vma_offset_node *node;
> +               struct i915_mmap_offset *mmo;
>                 struct i915_vma *vma;
>                 u64 vma_offset;
>
> @@ -641,10 +642,18 @@ static void revoke_mmaps(struct intel_gt *gt)
>                 GEM_BUG_ON(vma->fence != &gt->ggtt->fence_regs[i]);
>                 node = &vma->obj->base.vma_node;
>                 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
> -               unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
> +
> +               list_for_each_entry(mmo, &vma->obj->mmap_offsets, offset) {
> +                       node = &mmo->vma_node;
> +                       if (!drm_mm_node_allocated(&node->vm_node) ||
> +                           mmo->mmap_type != I915_MMAP_TYPE_GTT)
> +                               continue;
> +
> +                       unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
>                                     drm_vma_node_offset_addr(node) + vma_offset,
>                                     vma->size,
>                                     1);
> +               }
>         }
>  }
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
> index 1c4576a4a5e9..bdd597200984 100644
> --- a/drivers/gpu/drm/i915/i915_drv.c
> +++ b/drivers/gpu/drm/i915/i915_drv.c
> @@ -2722,18 +2722,12 @@ const struct dev_pm_ops i915_pm_ops = {
>         .runtime_resume = intel_runtime_resume,
>  };
>
> -static const struct vm_operations_struct i915_gem_vm_ops = {
> -       .fault = i915_gem_fault,
> -       .open = drm_gem_vm_open,
> -       .close = drm_gem_vm_close,
> -};
> -
>  static const struct file_operations i915_driver_fops = {
>         .owner = THIS_MODULE,
>         .open = drm_open,
>         .release = drm_release,
>         .unlocked_ioctl = drm_ioctl,
> -       .mmap = drm_gem_mmap,
> +       .mmap = i915_gem_mmap,
>         .poll = drm_poll,
>         .read = drm_read,
>         .compat_ioctl = i915_compat_ioctl,
> @@ -2822,7 +2816,6 @@ static struct drm_driver driver = {
>
>         .gem_close_object = i915_gem_close_object,
>         .gem_free_object_unlocked = i915_gem_free_object,
> -       .gem_vm_ops = &i915_gem_vm_ops,
>
>         .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
>         .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index b42651a387d9..bf2927e88e14 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -2327,6 +2327,7 @@ int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
>  void i915_gem_suspend(struct drm_i915_private *dev_priv);
>  void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
>  void i915_gem_resume(struct drm_i915_private *dev_priv);
> +int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma);
>  vm_fault_t i915_gem_fault(struct vm_fault *vmf);
>
>  int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
> diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
> index e0e677b2a3a9..677323644319 100644
> --- a/drivers/gpu/drm/i915/i915_vma.c
> +++ b/drivers/gpu/drm/i915/i915_vma.c
> @@ -861,7 +861,8 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
>
>  void i915_vma_revoke_mmap(struct i915_vma *vma)
>  {
> -       struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
> +       struct drm_vma_offset_node *node;
> +       struct i915_mmap_offset *mmo;
>         u64 vma_offset;
>
>         lockdep_assert_held(&vma->vm->mutex);
> @@ -873,10 +874,19 @@ void i915_vma_revoke_mmap(struct i915_vma *vma)
>         GEM_BUG_ON(!vma->obj->userfault_count);
>
>         vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
> -       unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
> -                           drm_vma_node_offset_addr(node) + vma_offset,
> -                           vma->size,
> -                           1);
> +
> +       list_for_each_entry(mmo, &vma->obj->mmap_offsets, offset) {
> +               node = &mmo->vma_node;
> +               /* Only gtt-mmaps for this vma should be unmapped */
> +               if (!drm_mm_node_allocated(&node->vm_node) ||
> +                   mmo->mmap_type != I915_MMAP_TYPE_GTT)
> +                       continue;
> +
> +               unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
> +                                   drm_vma_node_offset_addr(node) + vma_offset,
> +                                   vma->size,
> +                                   1);
> +       }
>
>         i915_vma_unset_userfault(vma);
>         if (!--vma->obj->userfault_count)
> --
> 2.23.0
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
Quoting Balestrieri, Francesco (2019-09-11 13:03:25)
> On 04/09/2019, 13.33, "Intel-gfx on behalf of Daniel Vetter" <intel-gfx-bounces@lists.freedesktop.org on behalf of daniel@ffwll.ch> wrote:
> 
>     On Mon, Aug 26, 2019 at 2:21 PM Abdiel Janulgue
>     > -       ret = create_mmap_offset(obj);
>     > -       if (ret == 0)
>     > -               *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
>     > +       mmo = kzalloc(sizeof(*mmo), GFP_KERNEL);
>     
>     I got thrown off a bunch of times here reading the code, but I think I
>     got this right now.
>     
>     Why exactly do we want multiple vma offsets? Yes this makes it a
>     drop-in replacement for the old cpu mmap ioctl, which was a bit
>     dubious design. But if we go all new here, I really wonder about why
>     this is necessary. No other discrete driver needs this, they all fix
>     the mmap mode for the lifetime of an object, because flushing stuff is
>     as expensive as just reallocating (or at least close enough).
>     
>     I think us going once again our separate route here needs a lot more
>     justification than just "we've accidentally ended up with uapi like
>     this 10 years ago".

That's exactly the whole point, to replace the uapi we accidentally
ended up with 10 years ago with the api that doesn't cause valgrind to
complain, is easily extensible and supports all legacy usecases which
should be a very good position to be in to support unknown future
usecases as well. Letting userspace control their mmapings is very
powerful, and we definitely do not want to be limiting their
flexibility.

That no other driver even seems to allow multiple mmaps, and so has
not developed a desire to manage multiple vma per object does not seem
to be a reason to limit ourselves. The infrastructure all supports it;
the only thing that is at odds is the desire to force the lowest common
denominator as the defacto standard.
-Chris
On Wed, Sep 11, 2019 at 2:19 PM Chris Wilson <chris@chris-wilson.co.uk> wrote:
> Quoting Balestrieri, Francesco (2019-09-11 13:03:25)
> > On 04/09/2019, 13.33, "Intel-gfx on behalf of Daniel Vetter" <intel-gfx-bounces@lists.freedesktop.org on behalf of daniel@ffwll.ch> wrote:
> >
> >     On Mon, Aug 26, 2019 at 2:21 PM Abdiel Janulgue
> >     > -       ret = create_mmap_offset(obj);
> >     > -       if (ret == 0)
> >     > -               *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
> >     > +       mmo = kzalloc(sizeof(*mmo), GFP_KERNEL);
> >
> >     I got thrown off a bunch of times here reading the code, but I think I
> >     got this right now.
> >
> >     Why exactly do we want multiple vma offsets? Yes this makes it a
> >     drop-in replacement for the old cpu mmap ioctl, which was a bit
> >     dubious design. But if we go all new here, I really wonder about why
> >     this is necessary. No other discrete driver needs this, they all fix
> >     the mmap mode for the lifetime of an object, because flushing stuff is
> >     as expensive as just reallocating (or at least close enough).
> >
> >     I think us going once again our separate route here needs a lot more
> >     justification than just "we've accidentally ended up with uapi like
> >     this 10 years ago".
>
> That's exactly the whole point, to replace the uapi we accidentally
> ended up with 10 years ago with the api that doesn't cause valgrind to
> complain, is easily extensible and supports all legacy usecases which
> should be a very good position to be in to support unknown future
> usecases as well. Letting userspace control their mmapings is very
> powerful, and we definitely do not want to be limiting their
> flexibility.
>
> That no other driver even seems to allow multiple mmaps, and so has
> not developed a desire to manage multiple vma per object does not seem
> to be a reason to limit ourselves. The infrastructure all supports it;
> the only thing that is at odds is the desire to force the lowest common
> denominator as the defacto standard.

Just because something is possible (or looks possible at first)
doesn't make it good uapi. That's how we get stuff like gtt mmap on
userptr bo, and then regrets. This entire thing here gets sold as
"uapi cleanup for lmem". Which makes sense, except most of the uapi
isn't really cleaned up at all:

- We still have relocations (and we even made them more powerful by
pipelining them, at a time when all our mesa drivers finally managed
to move to softpin). We could ditch all the reloc stuff for gen12+ and
lmem instead. Both amdgpu and nv50+ have done that.

- We're still adding more uapi that's all adding variable state.
Meanwhile everything else (gallium/iris, vk) move to invariant state
object models, where you create stuff once with the right properties
and call it a day. lmem does add quite a few new state bits here,
would be a lot simpler if we could make them as invariant as possible.
For placement we might need to allow changes to the priority, but not
to the placement list itself. For mmap, you just select the mmap mode
that you want (because once you fixing caching mode, placement list
and tiling bits, there's really not a choice anymore), and that's the
one you get with the single mmap offset.

Instead we just hang onto all the accumulated complexity, add more,
and the only cleanup I'm seeing is that we bake in the multi-mmap
model to save one single line in userspace for the valgrind
annotation. Which everyone has already anyway, that is not really
going to go away.

I think using lmem to clean up the uapi makes tons of sense, but if we
bother with that it should be a real cleanup. Not just cosmetics for
the one valgrind annotation line in userspace we have right now.
-Daniel
On Thu, Sep 19, 2019 at 3:05 PM Daniel Vetter <daniel@ffwll.ch> wrote:
>
> On Wed, Sep 11, 2019 at 2:19 PM Chris Wilson <chris@chris-wilson.co.uk> wrote:
> > Quoting Balestrieri, Francesco (2019-09-11 13:03:25)
> > > On 04/09/2019, 13.33, "Intel-gfx on behalf of Daniel Vetter" <intel-gfx-bounces@lists.freedesktop.org on behalf of daniel@ffwll.ch> wrote:
> > >
> > >     On Mon, Aug 26, 2019 at 2:21 PM Abdiel Janulgue
> > >     > -       ret = create_mmap_offset(obj);
> > >     > -       if (ret == 0)
> > >     > -               *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
> > >     > +       mmo = kzalloc(sizeof(*mmo), GFP_KERNEL);
> > >
> > >     I got thrown off a bunch of times here reading the code, but I think I
> > >     got this right now.
> > >
> > >     Why exactly do we want multiple vma offsets? Yes this makes it a
> > >     drop-in replacement for the old cpu mmap ioctl, which was a bit
> > >     dubious design. But if we go all new here, I really wonder about why
> > >     this is necessary. No other discrete driver needs this, they all fix
> > >     the mmap mode for the lifetime of an object, because flushing stuff is
> > >     as expensive as just reallocating (or at least close enough).
> > >
> > >     I think us going once again our separate route here needs a lot more
> > >     justification than just "we've accidentally ended up with uapi like
> > >     this 10 years ago".
> >
> > That's exactly the whole point, to replace the uapi we accidentally
> > ended up with 10 years ago with the api that doesn't cause valgrind to
> > complain, is easily extensible and supports all legacy usecases which
> > should be a very good position to be in to support unknown future
> > usecases as well. Letting userspace control their mmapings is very
> > powerful, and we definitely do not want to be limiting their
> > flexibility.
> >
> > That no other driver even seems to allow multiple mmaps, and so has
> > not developed a desire to manage multiple vma per object does not seem
> > to be a reason to limit ourselves. The infrastructure all supports it;
> > the only thing that is at odds is the desire to force the lowest common
> > denominator as the defacto standard.
>
> Just because something is possible (or looks possible at first)
> doesn't make it good uapi. That's how we get stuff like gtt mmap on
> userptr bo, and then regrets. This entire thing here gets sold as
> "uapi cleanup for lmem". Which makes sense, except most of the uapi
> isn't really cleaned up at all:
>
> - We still have relocations (and we even made them more powerful by
> pipelining them, at a time when all our mesa drivers finally managed
> to move to softpin). We could ditch all the reloc stuff for gen12+ and
> lmem instead. Both amdgpu and nv50+ have done that.
>
> - We're still adding more uapi that's all adding variable state.
> Meanwhile everything else (gallium/iris, vk) move to invariant state
> object models, where you create stuff once with the right properties
> and call it a day. lmem does add quite a few new state bits here,
> would be a lot simpler if we could make them as invariant as possible.
> For placement we might need to allow changes to the priority, but not
> to the placement list itself. For mmap, you just select the mmap mode
> that you want (because once you fixing caching mode, placement list
> and tiling bits, there's really not a choice anymore), and that's the
> one you get with the single mmap offset.
>
> Instead we just hang onto all the accumulated complexity, add more,
> and the only cleanup I'm seeing is that we bake in the multi-mmap
> model to save one single line in userspace for the valgrind
> annotation. Which everyone has already anyway, that is not really
> going to go away.
>
> I think using lmem to clean up the uapi makes tons of sense, but if we
> bother with that it should be a real cleanup. Not just cosmetics for
> the one valgrind annotation line in userspace we have right now.

Chatted with Chris, and one useful thing that multi-mmap gives us (and
which I ignored) is much easier testing of coherency between all the
different views. And we need that because the kernel needs that anyway
(at least cpu wc view against all others is needed for swap-out), and
we need to be able to test it well because our hw is historically
rather buggy in this area. So bummer and lots of sighs :-/
-Daniel