[1/2] drm/amdgpu/sriov: For sriov runtime, use kiq to do invalidate tlb

Submitted by Deng, Emily on Aug. 15, 2018, 12:54 p.m.

Details

Message ID 1534337675-25039-1-git-send-email-Emily.Deng@amd.com
State New
Headers show
Series "Series without cover letter" ( rev: 1 ) in AMD X.Org drivers

Not browsing as part of any series.

Commit Message

Deng, Emily Aug. 15, 2018, 12:54 p.m.
To avoid the tlb flush not interrupted by world switch, use kiq and one
command to do tlb invalidate.

v2:
Add firmware version checking.

v3:
Refine the code, and move the firmware
checking into gfx_v9_0_ring_emit_reg_write_reg_wait.

v4:
Change the name amdgpu_kiq_invalidate_tlb to amdgpu_kiq_reg_write_reg_wait.
Remove the in_interrupt.
Refine code.

SWDEV-161497

Signed-off-by: Emily Deng <Emily.Deng@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h      |  4 +++
 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c |  3 --
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c    | 15 +++++++-
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c    | 60 ++++++++++++++++++++++++++++++++
 4 files changed, 78 insertions(+), 4 deletions(-)

Patch hide | download patch | download mbox

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 6265b88..19ef7711 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -212,6 +212,10 @@  enum amdgpu_kiq_irq {
 	AMDGPU_CP_KIQ_IRQ_LAST
 };
 
+#define MAX_KIQ_REG_WAIT       5000 /* in usecs, 5ms */
+#define MAX_KIQ_REG_BAILOUT_INTERVAL   5 /* in msecs, 5ms */
+#define MAX_KIQ_REG_TRY 20
+
 int amdgpu_device_ip_set_clockgating_state(void *dev,
 					   enum amd_ip_block_type block_type,
 					   enum amd_clockgating_state state);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 21adb1b6..3885636 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -22,9 +22,6 @@ 
  */
 
 #include "amdgpu.h"
-#define MAX_KIQ_REG_WAIT	5000 /* in usecs, 5ms */
-#define MAX_KIQ_REG_BAILOUT_INTERVAL	5 /* in msecs, 5ms */
-#define MAX_KIQ_REG_TRY 20
 
 uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
 {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 76d979e..e010166 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -4348,8 +4348,21 @@  static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
 						  uint32_t ref, uint32_t mask)
 {
 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
+	struct amdgpu_device *adev = ring->adev;
+	bool fw_version_ok = false;
 
-	if (amdgpu_sriov_vf(ring->adev))
+	if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
+		if ((adev->gfx.me_fw_version >= 0x0000009c) && (adev->gfx.me_feature_version >= 42)
+			&& (adev->gfx.pfp_fw_version >=  0x000000b1) && (adev->gfx.pfp_feature_version >= 42))
+				fw_version_ok = true;
+	} else {
+		if ((adev->gfx.mec_fw_version >=  0x00000193) && (adev->gfx.mec_feature_version >= 42))
+			fw_version_ok = true;
+	}
+
+	fw_version_ok = (adev->asic_type == CHIP_VEGA10) ? fw_version_ok : false;
+
+	if (amdgpu_sriov_vf(adev) && fw_version_ok)
 		gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
 				      ref, mask, 0x20);
 	else
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index ed467de..9726c7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -311,6 +311,59 @@  static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
 	return req;
 }
 
+signed long  amdgpu_kiq_reg_write_reg_wait(struct amdgpu_device *adev, struct amdgpu_vmhub *hub,
+		unsigned eng, u32 req, uint32_t vmid)
+{
+	signed long r, cnt = 0;
+	unsigned long flags;
+	uint32_t seq;
+	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+	struct amdgpu_ring *ring = &kiq->ring;
+
+	if (!ring->ready) {
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&kiq->ring_lock, flags);
+
+	amdgpu_ring_alloc(ring, 32);
+	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
+					    hub->vm_inv_eng0_ack + eng,
+					    req, 1 << vmid);
+	amdgpu_fence_emit_polling(ring, &seq);
+	amdgpu_ring_commit(ring);
+	spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+	/* don't wait anymore for gpu reset case because this way may
+	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+	 * never return if we keep waiting in virt_kiq_rreg, which cause
+	 * gpu_recover() hang there.
+	 *
+	 * also don't wait anymore for IRQ context
+	 * */
+	if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
+		goto failed_kiq;
+
+	might_sleep();
+
+	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+	}
+
+	if (cnt > MAX_KIQ_REG_TRY)
+		goto failed_kiq;
+
+	return 0;
+
+failed_kiq:
+	pr_err("failed to invalidate tlb with kiq\n");
+	return r;
+}
+
 /*
  * GART
  * VMID 0 is the physical GPU addresses as used by the kernel.
@@ -332,6 +385,7 @@  static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
 	/* Use register 17 for GART */
 	const unsigned eng = 17;
 	unsigned i, j;
+	int r;
 
 	spin_lock(&adev->gmc.invalidate_lock);
 
@@ -339,6 +393,12 @@  static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
 		struct amdgpu_vmhub *hub = &adev->vmhub[i];
 		u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
 
+		if (amdgpu_sriov_vf(adev)) {
+			r = amdgpu_kiq_reg_write_reg_wait(adev, hub, eng, tmp, vmid);
+			if (!r)
+				continue;
+		}
+
 		WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
 
 		/* Busy wait for ACK.*/

Comments

Am 15.08.2018 um 14:54 schrieb Emily Deng:
> To avoid the tlb flush not interrupted by world switch, use kiq and one
> command to do tlb invalidate.
>
> v2:
> Add firmware version checking.
>
> v3:
> Refine the code, and move the firmware
> checking into gfx_v9_0_ring_emit_reg_write_reg_wait.
>
> v4:
> Change the name amdgpu_kiq_invalidate_tlb to amdgpu_kiq_reg_write_reg_wait.
> Remove the in_interrupt.
> Refine code.
>
> SWDEV-161497
>
> Signed-off-by: Emily Deng <Emily.Deng@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu.h      |  4 +++
>   drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c |  3 --
>   drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c    | 15 +++++++-
>   drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c    | 60 ++++++++++++++++++++++++++++++++
>   4 files changed, 78 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index 6265b88..19ef7711 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -212,6 +212,10 @@ enum amdgpu_kiq_irq {
>   	AMDGPU_CP_KIQ_IRQ_LAST
>   };
>   
> +#define MAX_KIQ_REG_WAIT       5000 /* in usecs, 5ms */
> +#define MAX_KIQ_REG_BAILOUT_INTERVAL   5 /* in msecs, 5ms */
> +#define MAX_KIQ_REG_TRY 20
> +
>   int amdgpu_device_ip_set_clockgating_state(void *dev,
>   					   enum amd_ip_block_type block_type,
>   					   enum amd_clockgating_state state);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> index 21adb1b6..3885636 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> @@ -22,9 +22,6 @@
>    */
>   
>   #include "amdgpu.h"
> -#define MAX_KIQ_REG_WAIT	5000 /* in usecs, 5ms */
> -#define MAX_KIQ_REG_BAILOUT_INTERVAL	5 /* in msecs, 5ms */
> -#define MAX_KIQ_REG_TRY 20
>   
>   uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
>   {
> diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> index 76d979e..e010166 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> @@ -4348,8 +4348,21 @@ static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
>   						  uint32_t ref, uint32_t mask)
>   {
>   	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
> +	struct amdgpu_device *adev = ring->adev;
> +	bool fw_version_ok = false;
>   
> -	if (amdgpu_sriov_vf(ring->adev))
> +	if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
> +		if ((adev->gfx.me_fw_version >= 0x0000009c) && (adev->gfx.me_feature_version >= 42)
> +			&& (adev->gfx.pfp_fw_version >=  0x000000b1) && (adev->gfx.pfp_feature_version >= 42))
> +				fw_version_ok = true;

The indentation here is wrong and the && belongs at the end of the line.

> +	} else {
> +		if ((adev->gfx.mec_fw_version >=  0x00000193) && (adev->gfx.mec_feature_version >= 42))
> +			fw_version_ok = true;
> +	}
> +
> +	fw_version_ok = (adev->asic_type == CHIP_VEGA10) ? fw_version_ok : false;
> +
> +	if (amdgpu_sriov_vf(adev) && fw_version_ok)

Please squash the second patch into this one. The goal is to use the 
same code path on both SRIOV and bare metal.

I would rather separate our this change into a patch and then make the 
TLB invalidation the second patch.

>   		gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
>   				      ref, mask, 0x20);
>   	else
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> index ed467de..9726c7e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> @@ -311,6 +311,59 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
>   	return req;
>   }
>   
> +signed long  amdgpu_kiq_reg_write_reg_wait(struct amdgpu_device *adev, struct amdgpu_vmhub *hub,
> +		unsigned eng, u32 req, uint32_t vmid)

That is still specialized, e.g. you assume that you work with a VMHUB 
here instead of specifying the registers, req and value separately.

> +{
> +	signed long r, cnt = 0;
> +	unsigned long flags;
> +	uint32_t seq;
> +	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
> +	struct amdgpu_ring *ring = &kiq->ring;
> +
> +	if (!ring->ready) {
> +		return -EINVAL;
> +	}

Please drop the {} here.

Christian.

> +
> +	spin_lock_irqsave(&kiq->ring_lock, flags);
> +
> +	amdgpu_ring_alloc(ring, 32);
> +	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
> +					    hub->vm_inv_eng0_ack + eng,
> +					    req, 1 << vmid);
> +	amdgpu_fence_emit_polling(ring, &seq);
> +	amdgpu_ring_commit(ring);
> +	spin_unlock_irqrestore(&kiq->ring_lock, flags);
> +
> +	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
> +
> +	/* don't wait anymore for gpu reset case because this way may
> +	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
> +	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
> +	 * never return if we keep waiting in virt_kiq_rreg, which cause
> +	 * gpu_recover() hang there.
> +	 *
> +	 * also don't wait anymore for IRQ context
> +	 * */
> +	if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
> +		goto failed_kiq;
> +
> +	might_sleep();
> +
> +	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
> +		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
> +		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
> +	}
> +
> +	if (cnt > MAX_KIQ_REG_TRY)
> +		goto failed_kiq;
> +
> +	return 0;
> +
> +failed_kiq:
> +	pr_err("failed to invalidate tlb with kiq\n");
> +	return r;
> +}
> +
>   /*
>    * GART
>    * VMID 0 is the physical GPU addresses as used by the kernel.
> @@ -332,6 +385,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
>   	/* Use register 17 for GART */
>   	const unsigned eng = 17;
>   	unsigned i, j;
> +	int r;
>   
>   	spin_lock(&adev->gmc.invalidate_lock);
>   
> @@ -339,6 +393,12 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
>   		struct amdgpu_vmhub *hub = &adev->vmhub[i];
>   		u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
>   
> +		if (amdgpu_sriov_vf(adev)) {
> +			r = amdgpu_kiq_reg_write_reg_wait(adev, hub, eng, tmp, vmid);
> +			if (!r)
> +				continue;
> +		}
> +
>   		WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
>   
>   		/* Busy wait for ACK.*/