[libdrm,1/2] amdgpu: prevent an integer wraparound of cpu_map_count

Submitted by Marek Olšák on Oct. 23, 2018, 7:07 p.m.

Details

Message ID 20181023190733.15251-1-maraeo@gmail.com
State New
Headers show
Series "Series without cover letter" ( rev: 1 ) in AMD X.Org drivers

Not browsing as part of any series.

Commit Message

Marek Olšák Oct. 23, 2018, 7:07 p.m.
From: Marek Olšák <marek.olsak@amd.com>

---
 amdgpu/amdgpu_bo.c | 19 +++++++++++++++++--
 1 file changed, 17 insertions(+), 2 deletions(-)

Patch hide | download patch | download mbox

diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
index c0f42e81..81f8a5f7 100644
--- a/amdgpu/amdgpu_bo.c
+++ b/amdgpu/amdgpu_bo.c
@@ -22,20 +22,21 @@ 
  *
  */
 
 #include <stdlib.h>
 #include <stdio.h>
 #include <stdint.h>
 #include <string.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <unistd.h>
+#include <limits.h>
 #include <sys/ioctl.h>
 #include <sys/mman.h>
 #include <sys/time.h>
 
 #include "libdrm_macros.h"
 #include "xf86drm.h"
 #include "amdgpu_drm.h"
 #include "amdgpu_internal.h"
 #include "util_math.h"
 
@@ -442,21 +443,29 @@  drm_public int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
 {
 	union drm_amdgpu_gem_mmap args;
 	void *ptr;
 	int r;
 
 	pthread_mutex_lock(&bo->cpu_access_mutex);
 
 	if (bo->cpu_ptr) {
 		/* already mapped */
 		assert(bo->cpu_map_count > 0);
-		bo->cpu_map_count++;
+
+		/* If the counter has already reached INT_MAX, don't increment
+		 * it and assume that the buffer will be mapped indefinitely.
+		 * The buffer is pretty unlikely to get unmapped by the user
+		 * at this point.
+		 */
+		if (bo->cpu_map_count != INT_MAX)
+			bo->cpu_map_count++;
+
 		*cpu = bo->cpu_ptr;
 		pthread_mutex_unlock(&bo->cpu_access_mutex);
 		return 0;
 	}
 
 	assert(bo->cpu_map_count == 0);
 
 	memset(&args, 0, sizeof(args));
 
 	/* Query the buffer address (args.addr_ptr).
@@ -492,21 +501,27 @@  drm_public int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
 
 	pthread_mutex_lock(&bo->cpu_access_mutex);
 	assert(bo->cpu_map_count >= 0);
 
 	if (bo->cpu_map_count == 0) {
 		/* not mapped */
 		pthread_mutex_unlock(&bo->cpu_access_mutex);
 		return -EINVAL;
 	}
 
-	bo->cpu_map_count--;
+	/* If the counter has already reached INT_MAX, don't decrement it.
+	 * This is because amdgpu_bo_cpu_map doesn't increment it past
+	 * INT_MAX.
+	 */
+	if (bo->cpu_map_count != INT_MAX)
+		bo->cpu_map_count--;
+
 	if (bo->cpu_map_count > 0) {
 		/* mapped multiple times */
 		pthread_mutex_unlock(&bo->cpu_access_mutex);
 		return 0;
 	}
 
 	r = drm_munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno;
 	bo->cpu_ptr = NULL;
 	pthread_mutex_unlock(&bo->cpu_access_mutex);
 	return r;

Comments

On 10/24/18 3:07 AM, Marek Olšák wrote:
> From: Marek Olšák <marek.olsak@amd.com>

We need commit log and sign-off here.

BTW, have you encounter any issue about that?

>
> ---
>   amdgpu/amdgpu_bo.c | 19 +++++++++++++++++--
>   1 file changed, 17 insertions(+), 2 deletions(-)
>
> diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
> index c0f42e81..81f8a5f7 100644
> --- a/amdgpu/amdgpu_bo.c
> +++ b/amdgpu/amdgpu_bo.c
> @@ -22,20 +22,21 @@
>    *
>    */
>   
>   #include <stdlib.h>
>   #include <stdio.h>
>   #include <stdint.h>
>   #include <string.h>
>   #include <errno.h>
>   #include <fcntl.h>
>   #include <unistd.h>
> +#include <limits.h>
>   #include <sys/ioctl.h>
>   #include <sys/mman.h>
>   #include <sys/time.h>
>   
>   #include "libdrm_macros.h"
>   #include "xf86drm.h"
>   #include "amdgpu_drm.h"
>   #include "amdgpu_internal.h"
>   #include "util_math.h"
>   
> @@ -442,21 +443,29 @@ drm_public int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
>   {
>   	union drm_amdgpu_gem_mmap args;
>   	void *ptr;
>   	int r;
>   
>   	pthread_mutex_lock(&bo->cpu_access_mutex);
>   
>   	if (bo->cpu_ptr) {
>   		/* already mapped */
>   		assert(bo->cpu_map_count > 0);
> -		bo->cpu_map_count++;
> +
> +		/* If the counter has already reached INT_MAX, don't increment
> +		 * it and assume that the buffer will be mapped indefinitely.
> +		 * The buffer is pretty unlikely to get unmapped by the user
> +		 * at this point.
> +		 */
> +		if (bo->cpu_map_count != INT_MAX)
> +			bo->cpu_map_count++;

If so, shall we print some error here to notice that indefinite mappings 
come up.

Regards,
Jerry
> +
>   		*cpu = bo->cpu_ptr;
>   		pthread_mutex_unlock(&bo->cpu_access_mutex);
>   		return 0;
>   	}
>   
>   	assert(bo->cpu_map_count == 0);
>   
>   	memset(&args, 0, sizeof(args));
>   
>   	/* Query the buffer address (args.addr_ptr).
> @@ -492,21 +501,27 @@ drm_public int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
>   
>   	pthread_mutex_lock(&bo->cpu_access_mutex);
>   	assert(bo->cpu_map_count >= 0);
>   
>   	if (bo->cpu_map_count == 0) {
>   		/* not mapped */
>   		pthread_mutex_unlock(&bo->cpu_access_mutex);
>   		return -EINVAL;
>   	}
>   
> -	bo->cpu_map_count--;
> +	/* If the counter has already reached INT_MAX, don't decrement it.
> +	 * This is because amdgpu_bo_cpu_map doesn't increment it past
> +	 * INT_MAX.
> +	 */
> +	if (bo->cpu_map_count != INT_MAX)
> +		bo->cpu_map_count--;
> +
>   	if (bo->cpu_map_count > 0) {
>   		/* mapped multiple times */
>   		pthread_mutex_unlock(&bo->cpu_access_mutex);
>   		return 0;
>   	}
>   
>   	r = drm_munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno;
>   	bo->cpu_ptr = NULL;
>   	pthread_mutex_unlock(&bo->cpu_access_mutex);
>   	return r;
That looks really ugly to me. Mapping the same BO so often is illegal 
and should be handled as error.

Otherwise we will never be able to cleanly recover from a GPU lockup 
with lost state by reloading the client library.

Christian.

Am 23.10.18 um 21:07 schrieb Marek Olšák:
> From: Marek Olšák <marek.olsak@amd.com>
>
> ---
>   amdgpu/amdgpu_bo.c | 19 +++++++++++++++++--
>   1 file changed, 17 insertions(+), 2 deletions(-)
>
> diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
> index c0f42e81..81f8a5f7 100644
> --- a/amdgpu/amdgpu_bo.c
> +++ b/amdgpu/amdgpu_bo.c
> @@ -22,20 +22,21 @@
>    *
>    */
>   
>   #include <stdlib.h>
>   #include <stdio.h>
>   #include <stdint.h>
>   #include <string.h>
>   #include <errno.h>
>   #include <fcntl.h>
>   #include <unistd.h>
> +#include <limits.h>
>   #include <sys/ioctl.h>
>   #include <sys/mman.h>
>   #include <sys/time.h>
>   
>   #include "libdrm_macros.h"
>   #include "xf86drm.h"
>   #include "amdgpu_drm.h"
>   #include "amdgpu_internal.h"
>   #include "util_math.h"
>   
> @@ -442,21 +443,29 @@ drm_public int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
>   {
>   	union drm_amdgpu_gem_mmap args;
>   	void *ptr;
>   	int r;
>   
>   	pthread_mutex_lock(&bo->cpu_access_mutex);
>   
>   	if (bo->cpu_ptr) {
>   		/* already mapped */
>   		assert(bo->cpu_map_count > 0);
> -		bo->cpu_map_count++;
> +
> +		/* If the counter has already reached INT_MAX, don't increment
> +		 * it and assume that the buffer will be mapped indefinitely.
> +		 * The buffer is pretty unlikely to get unmapped by the user
> +		 * at this point.
> +		 */
> +		if (bo->cpu_map_count != INT_MAX)
> +			bo->cpu_map_count++;
> +
>   		*cpu = bo->cpu_ptr;
>   		pthread_mutex_unlock(&bo->cpu_access_mutex);
>   		return 0;
>   	}
>   
>   	assert(bo->cpu_map_count == 0);
>   
>   	memset(&args, 0, sizeof(args));
>   
>   	/* Query the buffer address (args.addr_ptr).
> @@ -492,21 +501,27 @@ drm_public int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
>   
>   	pthread_mutex_lock(&bo->cpu_access_mutex);
>   	assert(bo->cpu_map_count >= 0);
>   
>   	if (bo->cpu_map_count == 0) {
>   		/* not mapped */
>   		pthread_mutex_unlock(&bo->cpu_access_mutex);
>   		return -EINVAL;
>   	}
>   
> -	bo->cpu_map_count--;
> +	/* If the counter has already reached INT_MAX, don't decrement it.
> +	 * This is because amdgpu_bo_cpu_map doesn't increment it past
> +	 * INT_MAX.
> +	 */
> +	if (bo->cpu_map_count != INT_MAX)
> +		bo->cpu_map_count--;
> +
>   	if (bo->cpu_map_count > 0) {
>   		/* mapped multiple times */
>   		pthread_mutex_unlock(&bo->cpu_access_mutex);
>   		return 0;
>   	}
>   
>   	r = drm_munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno;
>   	bo->cpu_ptr = NULL;
>   	pthread_mutex_unlock(&bo->cpu_access_mutex);
>   	return r;
On Tue, Oct 23, 2018 at 10:38 PM Zhang, Jerry(Junwei) <Jerry.Zhang@amd.com>
wrote:

> On 10/24/18 3:07 AM, Marek Olšák wrote:
> > From: Marek Olšák <marek.olsak@amd.com>
>
> We need commit log and sign-off here.
>
> BTW, have you encounter any issue about that?
>

I don't know what you mean. I'm pretty sure that a sign-off is not needed
for libdrm.


>
> >
> > ---
> >   amdgpu/amdgpu_bo.c | 19 +++++++++++++++++--
> >   1 file changed, 17 insertions(+), 2 deletions(-)
> >
> > diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
> > index c0f42e81..81f8a5f7 100644
> > --- a/amdgpu/amdgpu_bo.c
> > +++ b/amdgpu/amdgpu_bo.c
> > @@ -22,20 +22,21 @@
> >    *
> >    */
> >
> >   #include <stdlib.h>
> >   #include <stdio.h>
> >   #include <stdint.h>
> >   #include <string.h>
> >   #include <errno.h>
> >   #include <fcntl.h>
> >   #include <unistd.h>
> > +#include <limits.h>
> >   #include <sys/ioctl.h>
> >   #include <sys/mman.h>
> >   #include <sys/time.h>
> >
> >   #include "libdrm_macros.h"
> >   #include "xf86drm.h"
> >   #include "amdgpu_drm.h"
> >   #include "amdgpu_internal.h"
> >   #include "util_math.h"
> >
> > @@ -442,21 +443,29 @@ drm_public int amdgpu_bo_cpu_map(amdgpu_bo_handle
> bo, void **cpu)
> >   {
> >       union drm_amdgpu_gem_mmap args;
> >       void *ptr;
> >       int r;
> >
> >       pthread_mutex_lock(&bo->cpu_access_mutex);
> >
> >       if (bo->cpu_ptr) {
> >               /* already mapped */
> >               assert(bo->cpu_map_count > 0);
> > -             bo->cpu_map_count++;
> > +
> > +             /* If the counter has already reached INT_MAX, don't
> increment
> > +              * it and assume that the buffer will be mapped
> indefinitely.
> > +              * The buffer is pretty unlikely to get unmapped by the
> user
> > +              * at this point.
> > +              */
> > +             if (bo->cpu_map_count != INT_MAX)
> > +                     bo->cpu_map_count++;
>
> If so, shall we print some error here to notice that indefinite mappings
> come up.
>

No error. This is expected usage.

Marek
You and I discussed this extensively internally a while ago. It's expected
and correct behavior. Mesa doesn't unmap some buffers and never will.

Marek

On Wed, Oct 24, 2018 at 3:45 AM Christian König <
ckoenig.leichtzumerken@gmail.com> wrote:

> That looks really ugly to me. Mapping the same BO so often is illegal
> and should be handled as error.
>
> Otherwise we will never be able to cleanly recover from a GPU lockup
> with lost state by reloading the client library.
>
> Christian.
>
> Am 23.10.18 um 21:07 schrieb Marek Olšák:
> > From: Marek Olšák <marek.olsak@amd.com>
> >
> > ---
> >   amdgpu/amdgpu_bo.c | 19 +++++++++++++++++--
> >   1 file changed, 17 insertions(+), 2 deletions(-)
> >
> > diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
> > index c0f42e81..81f8a5f7 100644
> > --- a/amdgpu/amdgpu_bo.c
> > +++ b/amdgpu/amdgpu_bo.c
> > @@ -22,20 +22,21 @@
> >    *
> >    */
> >
> >   #include <stdlib.h>
> >   #include <stdio.h>
> >   #include <stdint.h>
> >   #include <string.h>
> >   #include <errno.h>
> >   #include <fcntl.h>
> >   #include <unistd.h>
> > +#include <limits.h>
> >   #include <sys/ioctl.h>
> >   #include <sys/mman.h>
> >   #include <sys/time.h>
> >
> >   #include "libdrm_macros.h"
> >   #include "xf86drm.h"
> >   #include "amdgpu_drm.h"
> >   #include "amdgpu_internal.h"
> >   #include "util_math.h"
> >
> > @@ -442,21 +443,29 @@ drm_public int amdgpu_bo_cpu_map(amdgpu_bo_handle
> bo, void **cpu)
> >   {
> >       union drm_amdgpu_gem_mmap args;
> >       void *ptr;
> >       int r;
> >
> >       pthread_mutex_lock(&bo->cpu_access_mutex);
> >
> >       if (bo->cpu_ptr) {
> >               /* already mapped */
> >               assert(bo->cpu_map_count > 0);
> > -             bo->cpu_map_count++;
> > +
> > +             /* If the counter has already reached INT_MAX, don't
> increment
> > +              * it and assume that the buffer will be mapped
> indefinitely.
> > +              * The buffer is pretty unlikely to get unmapped by the
> user
> > +              * at this point.
> > +              */
> > +             if (bo->cpu_map_count != INT_MAX)
> > +                     bo->cpu_map_count++;
> > +
> >               *cpu = bo->cpu_ptr;
> >               pthread_mutex_unlock(&bo->cpu_access_mutex);
> >               return 0;
> >       }
> >
> >       assert(bo->cpu_map_count == 0);
> >
> >       memset(&args, 0, sizeof(args));
> >
> >       /* Query the buffer address (args.addr_ptr).
> > @@ -492,21 +501,27 @@ drm_public int
> amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
> >
> >       pthread_mutex_lock(&bo->cpu_access_mutex);
> >       assert(bo->cpu_map_count >= 0);
> >
> >       if (bo->cpu_map_count == 0) {
> >               /* not mapped */
> >               pthread_mutex_unlock(&bo->cpu_access_mutex);
> >               return -EINVAL;
> >       }
> >
> > -     bo->cpu_map_count--;
> > +     /* If the counter has already reached INT_MAX, don't decrement it.
> > +      * This is because amdgpu_bo_cpu_map doesn't increment it past
> > +      * INT_MAX.
> > +      */
> > +     if (bo->cpu_map_count != INT_MAX)
> > +             bo->cpu_map_count--;
> > +
> >       if (bo->cpu_map_count > 0) {
> >               /* mapped multiple times */
> >               pthread_mutex_unlock(&bo->cpu_access_mutex);
> >               return 0;
> >       }
> >
> >       r = drm_munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno;
> >       bo->cpu_ptr = NULL;
> >       pthread_mutex_unlock(&bo->cpu_access_mutex);
> >       return r;
>
>
On 2018-10-29 10:15 p.m., Marek Olšák wrote:
> You and I discussed this extensively internally a while ago. It's expected
> and correct behavior. Mesa doesn't unmap some buffers and never will.

It doesn't need to keep mapping the same buffer over and over again
though, does it?
Am 30.10.18 um 09:20 schrieb Michel Dänzer:
> On 2018-10-29 10:15 p.m., Marek Olšák wrote:

>> You and I discussed this extensively internally a while ago. It's expected

>> and correct behavior. Mesa doesn't unmap some buffers and never will.


Ah! Now I at least remember what issue that one was good for.

> It doesn't need to keep mapping the same buffer over and over again

> though, does it?


Yeah, that's a bit unclear to me as well.

Christian.
On Tue, Oct 30, 2018, 4:20 AM Michel Dänzer <michel@daenzer.net> wrote:

> On 2018-10-29 10:15 p.m., Marek Olšák wrote:
> > You and I discussed this extensively internally a while ago. It's
> expected
> > and correct behavior. Mesa doesn't unmap some buffers and never will.
>
> It doesn't need to keep mapping the same buffer over and over again
> though, does it?
>

It doesnt map it again. It just doesnt unmap. So the next map call just
returns the pointer. It's correct to stop the counter wraparound.

Marek


>
> --
> Earthling Michel Dänzer               |               http://www.amd.com
> Libre software enthusiast             |             Mesa and X developer
>
On Tue, Oct 30, 2018, 11:49 AM Marek Olšák <maraeo@gmail.com> wrote:

>
>
> On Tue, Oct 30, 2018, 4:20 AM Michel Dänzer <michel@daenzer.net> wrote:
>
>> On 2018-10-29 10:15 p.m., Marek Olšák wrote:
>> > You and I discussed this extensively internally a while ago. It's
>> expected
>> > and correct behavior. Mesa doesn't unmap some buffers and never will.
>>
>> It doesn't need to keep mapping the same buffer over and over again
>> though, does it?
>>
>
> It doesnt map it again. It just doesnt unmap. So the next map call just
> returns the pointer. It's correct to stop the counter wraparound.
>

Mesa doesn't track whether a buffer is already mapped. Libdrm tracks that.
It's a feature of libdrm to return the same pointer and expect infinite
number of map calls.

Marek


> Marek
>
>
>>
>> --
>> Earthling Michel Dänzer               |               http://www.amd.com
>> Libre software enthusiast             |             Mesa and X developer
>>
>
On Tue, Oct 30, 2018, 11:52 AM Marek Olšák <maraeo@gmail.com> wrote:

>
>
> On Tue, Oct 30, 2018, 11:49 AM Marek Olšák <maraeo@gmail.com> wrote:
>
>>
>>
>> On Tue, Oct 30, 2018, 4:20 AM Michel Dänzer <michel@daenzer.net> wrote:
>>
>>> On 2018-10-29 10:15 p.m., Marek Olšák wrote:
>>> > You and I discussed this extensively internally a while ago. It's
>>> expected
>>> > and correct behavior. Mesa doesn't unmap some buffers and never will.
>>>
>>> It doesn't need to keep mapping the same buffer over and over again
>>> though, does it?
>>>
>>
>> It doesnt map it again. It just doesnt unmap. So the next map call just
>> returns the pointer. It's correct to stop the counter wraparound.
>>
>
> Mesa doesn't track whether a buffer is already mapped. Libdrm tracks that.
> It's a feature of libdrm to return the same pointer and expect infinite
> number of map calls.
>

Mesa has been having this optimization for 8 years. (since the radeon
winsys). It's surprising that it surprises you now.

Marek



> Marek
>
>
>> Marek
>>
>>
>>>
>>> --
>>> Earthling Michel Dänzer               |               http://www.amd.com
>>> Libre software enthusiast             |             Mesa and X developer
>>>
>>
On 2018-10-30 4:52 p.m., Marek Olšák wrote:
> On Tue, Oct 30, 2018, 11:49 AM Marek Olšák <maraeo@gmail.com> wrote:
>> On Tue, Oct 30, 2018, 4:20 AM Michel Dänzer <michel@daenzer.net> wrote:
>>
>>> On 2018-10-29 10:15 p.m., Marek Olšák wrote:
>>>> You and I discussed this extensively internally a while ago. It's
>>> expected
>>>> and correct behavior. Mesa doesn't unmap some buffers and never will.
>>>
>>> It doesn't need to keep mapping the same buffer over and over again
>>> though, does it?
>>>
>>
>> It doesnt map it again. It just doesnt unmap. So the next map call just
>> returns the pointer. It's correct to stop the counter wraparound.
>>
> 
> Mesa doesn't track whether a buffer is already mapped. Libdrm tracks that.
> It's a feature of libdrm to return the same pointer and expect infinite
> number of map calls.

That's not what the reference counting in libdrm is intended for. It's
for keeping track of how many independent callers have mapped the
buffer. Mesa should remember that it mapped a buffer and not map it again.
Am 30.10.18 um 16:59 schrieb Michel Dänzer:
> On 2018-10-30 4:52 p.m., Marek Olšák wrote:

>> On Tue, Oct 30, 2018, 11:49 AM Marek Olšák <maraeo@gmail.com> wrote:

>>> On Tue, Oct 30, 2018, 4:20 AM Michel Dänzer <michel@daenzer.net> wrote:

>>>

>>>> On 2018-10-29 10:15 p.m., Marek Olšák wrote:

>>>>> You and I discussed this extensively internally a while ago. It's

>>>> expected

>>>>> and correct behavior. Mesa doesn't unmap some buffers and never will.

>>>> It doesn't need to keep mapping the same buffer over and over again

>>>> though, does it?

>>>>

>>> It doesnt map it again. It just doesnt unmap. So the next map call just

>>> returns the pointer. It's correct to stop the counter wraparound.

>>>

>> Mesa doesn't track whether a buffer is already mapped. Libdrm tracks that.

>> It's a feature of libdrm to return the same pointer and expect infinite

>> number of map calls.

> That's not what the reference counting in libdrm is intended for. It's

> for keeping track of how many independent callers have mapped the

> buffer. Mesa should remember that it mapped a buffer and not map it again.


Well if Mesa just wants to query the existing mapping then why not add a 
amdgpu_bo_get_cpu_ptr() which just queries if a CPU mapping exists and 
if yes returns the appropriate pointer or NULL otherwise?

I mean when we want to abstract everything in libdrm then we just need 
to add the functions we need to use this abstraction.

Christian.
On Wed, Oct 31, 2018 at 3:59 AM Koenig, Christian <Christian.Koenig@amd.com>
wrote:

> Am 30.10.18 um 16:59 schrieb Michel Dänzer:
> > On 2018-10-30 4:52 p.m., Marek Olšák wrote:
> >> On Tue, Oct 30, 2018, 11:49 AM Marek Olšák <maraeo@gmail.com> wrote:
> >>> On Tue, Oct 30, 2018, 4:20 AM Michel Dänzer <michel@daenzer.net>
> wrote:
> >>>
> >>>> On 2018-10-29 10:15 p.m., Marek Olšák wrote:
> >>>>> You and I discussed this extensively internally a while ago. It's
> >>>> expected
> >>>>> and correct behavior. Mesa doesn't unmap some buffers and never will.
> >>>> It doesn't need to keep mapping the same buffer over and over again
> >>>> though, does it?
> >>>>
> >>> It doesnt map it again. It just doesnt unmap. So the next map call just
> >>> returns the pointer. It's correct to stop the counter wraparound.
> >>>
> >> Mesa doesn't track whether a buffer is already mapped. Libdrm tracks
> that.
> >> It's a feature of libdrm to return the same pointer and expect infinite
> >> number of map calls.
> > That's not what the reference counting in libdrm is intended for. It's
> > for keeping track of how many independent callers have mapped the
> > buffer. Mesa should remember that it mapped a buffer and not map it
> again.
>
> Well if Mesa just wants to query the existing mapping then why not add a
> amdgpu_bo_get_cpu_ptr() which just queries if a CPU mapping exists and
> if yes returns the appropriate pointer or NULL otherwise?
>
> I mean when we want to abstract everything in libdrm then we just need
> to add the functions we need to use this abstraction.
>

That can be future work for the sake of cleanliness and clarity, but it
would be a waste of time and wouldn't help old Mesa.

Marek
Am 31.10.18 um 23:12 schrieb Marek Olšák:
On Wed, Oct 31, 2018 at 3:59 AM Koenig, Christian <Christian.Koenig@amd.com<mailto:Christian.Koenig@amd.com>> wrote:
Am 30.10.18 um 16:59 schrieb Michel Dänzer:
> On 2018-10-30 4:52 p.m., Marek Olšák wrote:

>> On Tue, Oct 30, 2018, 11:49 AM Marek Olšák <maraeo@gmail.com<mailto:maraeo@gmail.com>> wrote:

>>> On Tue, Oct 30, 2018, 4:20 AM Michel Dänzer <michel@daenzer.net<mailto:michel@daenzer.net>> wrote:

>>>

>>>> On 2018-10-29 10:15 p.m., Marek Olšák wrote:

>>>>> You and I discussed this extensively internally a while ago. It's

>>>> expected

>>>>> and correct behavior. Mesa doesn't unmap some buffers and never will.

>>>> It doesn't need to keep mapping the same buffer over and over again

>>>> though, does it?

>>>>

>>> It doesnt map it again. It just doesnt unmap. So the next map call just

>>> returns the pointer. It's correct to stop the counter wraparound.

>>>

>> Mesa doesn't track whether a buffer is already mapped. Libdrm tracks that.

>> It's a feature of libdrm to return the same pointer and expect infinite

>> number of map calls.

> That's not what the reference counting in libdrm is intended for. It's

> for keeping track of how many independent callers have mapped the

> buffer. Mesa should remember that it mapped a buffer and not map it again.


Well if Mesa just wants to query the existing mapping then why not add a
amdgpu_bo_get_cpu_ptr() which just queries if a CPU mapping exists and
if yes returns the appropriate pointer or NULL otherwise?

I mean when we want to abstract everything in libdrm then we just need
to add the functions we need to use this abstraction.

That can be future work for the sake of cleanliness and clarity, but it would be a waste of time and wouldn't help old Mesa.

That it doesn't help old Mesa is unfortunate, but this is clearly a bug in Mesa.

If old Mesa is broken then we should fix it by updating it and not add workarounds for specific clients in libdrm.

Regards,
Christian.


Marek
On Fri, Nov 2, 2018 at 3:39 AM Koenig, Christian <Christian.Koenig@amd.com>
wrote:

> Am 31.10.18 um 23:12 schrieb Marek Olšák:
>
> On Wed, Oct 31, 2018 at 3:59 AM Koenig, Christian <
> Christian.Koenig@amd.com> wrote:
>
>> Am 30.10.18 um 16:59 schrieb Michel Dänzer:
>> > On 2018-10-30 4:52 p.m., Marek Olšák wrote:
>> >> On Tue, Oct 30, 2018, 11:49 AM Marek Olšák <maraeo@gmail.com> wrote:
>> >>> On Tue, Oct 30, 2018, 4:20 AM Michel Dänzer <michel@daenzer.net>
>> wrote:
>> >>>
>> >>>> On 2018-10-29 10:15 p.m., Marek Olšák wrote:
>> >>>>> You and I discussed this extensively internally a while ago. It's
>> >>>> expected
>> >>>>> and correct behavior. Mesa doesn't unmap some buffers and never
>> will.
>> >>>> It doesn't need to keep mapping the same buffer over and over again
>> >>>> though, does it?
>> >>>>
>> >>> It doesnt map it again. It just doesnt unmap. So the next map call
>> just
>> >>> returns the pointer. It's correct to stop the counter wraparound.
>> >>>
>> >> Mesa doesn't track whether a buffer is already mapped. Libdrm tracks
>> that.
>> >> It's a feature of libdrm to return the same pointer and expect infinite
>> >> number of map calls.
>> > That's not what the reference counting in libdrm is intended for. It's
>> > for keeping track of how many independent callers have mapped the
>> > buffer. Mesa should remember that it mapped a buffer and not map it
>> again.
>>
>> Well if Mesa just wants to query the existing mapping then why not add a
>> amdgpu_bo_get_cpu_ptr() which just queries if a CPU mapping exists and
>> if yes returns the appropriate pointer or NULL otherwise?
>>
>> I mean when we want to abstract everything in libdrm then we just need
>> to add the functions we need to use this abstraction.
>>
>
> That can be future work for the sake of cleanliness and clarity, but it
> would be a waste of time and wouldn't help old Mesa.
>
>
> That it doesn't help old Mesa is unfortunate, but this is clearly a bug in
> Mesa.
>
> If old Mesa is broken then we should fix it by updating it and not add
> workarounds for specific clients in libdrm.
>

It's not a workaround. We made a decision with amdgpu to share code by
moving portions of the Mesa winsys into libdrm. The map_count is part of
that. It's highly desirable to continue with code sharing. There is nothing
broken with Mesa. Mesa won't check whether a buffer is already mapped.
That's the responsibility of libdrm as part of code sharing and we don't
want to duplicate the same logic in Mesa. It's all part of the intended
design.

Marek