max-texture-size: test non-proxy targets with max size from proxy test

Submitted by Brian Paul on Feb. 7, 2014, 11:34 p.m.

Details

Message ID 1391816052-11773-1-git-send-email-brianp@vmware.com
State New
Headers show

Not browsing as part of any series.

Commit Message

Brian Paul Feb. 7, 2014, 11:34 p.m.
Save the max texture size found with the proxy targets.  Then use
that max size when we test the regular/non-proxy targets with
glTexImage and glTexSubImage().

The whole point of proxy textures is to be able to probe the maximum
texture size.  So let's use that size when we try the real textures.
That's what an application would typically do.

As it was, most of the GL_TEXTURE_3D tests were returning 'skip'
results because we couldn't allocate a 2048^3 or 1024^3 texture.
Now we should get pass/fail/crash when we try creating an N^3
texture when OpenGL told us that N should work.

This patch also renames the global target and internalformat arrays
to be plural.
---
 tests/texturing/max-texture-size.c |   73 +++++++++++++++++++++++++++++++-----
 1 file changed, 64 insertions(+), 9 deletions(-)

Patch hide | download patch | download mbox

diff --git a/tests/texturing/max-texture-size.c b/tests/texturing/max-texture-size.c
index 45e01c9..6a6140f 100644
--- a/tests/texturing/max-texture-size.c
+++ b/tests/texturing/max-texture-size.c
@@ -52,7 +52,7 @@  PIGLIT_GL_TEST_CONFIG_BEGIN
 
 PIGLIT_GL_TEST_CONFIG_END
 
-static const GLenum target[] = {
+static const GLenum targets[] = {
 	GL_TEXTURE_1D,
 	GL_TEXTURE_2D,
 	GL_TEXTURE_RECTANGLE,
@@ -60,7 +60,7 @@  static const GLenum target[] = {
 	GL_TEXTURE_3D,
 };
 
-static const GLenum internalformat[] = {
+static const GLenum internalformats[] = {
 	GL_RGBA8,
 	GL_RGBA16,
 	GL_RGBA32F,
@@ -157,6 +157,59 @@  initTexData(GLenum target, uint64_t sideLength)
 	return ((GLfloat *) calloc(nPixels * COLOR_COMPONENTS, sizeof(float)));
 }
 
+/** Convert the given format to an index */
+static int
+format_to_index(GLenum format)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(internalformats); i++) {
+		if (format == internalformats[i])
+			return i;
+	}
+	assert(!"Unexpected format");
+	return 0;
+}
+
+/** Convert the given target to an index */
+static int
+target_to_index(GLenum target)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(targets); i++) {
+		if (target == targets[i])
+			return i;
+	}
+	assert(!"Unexpected target");
+	return 0;
+}
+
+/** to save max texture size info */
+static int max_texture_size[ARRAY_SIZE(targets)][ARRAY_SIZE(internalformats)];
+
+/**
+ * Save the max side value for the given target/format.
+ * This is done when testing the proxy targets.
+ */
+static void
+save_max_side(GLenum target, GLenum format, int maxSide)
+{
+	int tex = target_to_index(target), fmt = format_to_index(format);
+	max_texture_size[tex][fmt] = maxSide;
+}
+
+/**
+ * Get the max side value for the given target/format.
+ * This is done when testing the non-proxy targets.
+ */
+static int
+get_max_side(GLenum target, GLenum format)
+{
+	int tex = target_to_index(target), fmt = format_to_index(format);
+	int maxSide = max_texture_size[tex][fmt];
+	assert(maxSide);
+	return maxSide;
+}
+
 static void
 test_proxy_texture_size(GLenum target, GLenum internalformat)
 {
@@ -221,6 +274,8 @@  test_proxy_texture_size(GLenum target, GLenum internalformat)
 		result = PIGLIT_PASS;
 	}
 
+	save_max_side(target, internalformat, maxSide);
+
 	piglit_report_subtest_result(result, "%s-%s",
 				     piglit_get_gl_enum_name(getProxyTarget(target)),
 				     piglit_get_gl_enum_name(internalformat));
@@ -257,8 +312,8 @@  test_non_proxy_texture_size(GLenum target, GLenum internalformat)
 	glTexParameteri(target, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
 	glTexParameteri(target, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
 
-	/* Query the largest supported texture size */
-	glGetIntegerv(getMaxTarget(target), &maxSide);
+	/* Use the max texture size which was found when testing the proxy targets */
+	maxSide = get_max_side(target, internalformat);
 
 	printf("%s, Internal Format = %s, Largest Texture Size = %d\n",
 	       piglit_get_gl_enum_name(target),
@@ -347,16 +402,16 @@  static void
 for_targets_and_formats(void(*test)(GLenum, GLenum))
 {
 	int i, j;
-	for (i = 0; i < ARRAY_SIZE(target); i++) {
-		for (j = 0; j < ARRAY_SIZE(internalformat); j++) {
+	for (i = 0; i < ARRAY_SIZE(targets); i++) {
+		for (j = 0; j < ARRAY_SIZE(internalformats); j++) {
 			/* Skip floating point formats if
 			 * GL_ARB_texture_float is not supported
 			 */
-			if ((internalformat[j] == GL_RGBA16F ||
-			    internalformat[j] == GL_RGBA32F) &&
+			if ((internalformats[j] == GL_RGBA16F ||
+			    internalformats[j] == GL_RGBA32F) &&
 			    !piglit_is_extension_supported("GL_ARB_texture_float"))
 				continue;
-			 test(target[i], internalformat[j]);
+			 test(targets[i], internalformats[j]);
 		}
 	}
 }

Comments

On 02/07/2014 03:34 PM, Brian Paul wrote:
> Save the max texture size found with the proxy targets.  Then use
> that max size when we test the regular/non-proxy targets with
> glTexImage and glTexSubImage().
> 
> The whole point of proxy textures is to be able to probe the maximum
> texture size.  So let's use that size when we try the real textures.
> That's what an application would typically do.
> 
> As it was, most of the GL_TEXTURE_3D tests were returning 'skip'
> results because we couldn't allocate a 2048^3 or 1024^3 texture.
> Now we should get pass/fail/crash when we try creating an N^3
> texture when OpenGL told us that N should work.

Which hardware, if any, have you tried this on?  Any closed source drivers?

> This patch also renames the global target and internalformat arrays
> to be plural.
> ---
>  tests/texturing/max-texture-size.c |   73 +++++++++++++++++++++++++++++++-----
>  1 file changed, 64 insertions(+), 9 deletions(-)
> 
> diff --git a/tests/texturing/max-texture-size.c b/tests/texturing/max-texture-size.c
> index 45e01c9..6a6140f 100644
> --- a/tests/texturing/max-texture-size.c
> +++ b/tests/texturing/max-texture-size.c
> @@ -52,7 +52,7 @@ PIGLIT_GL_TEST_CONFIG_BEGIN
>  
>  PIGLIT_GL_TEST_CONFIG_END
>  
> -static const GLenum target[] = {
> +static const GLenum targets[] = {
>  	GL_TEXTURE_1D,
>  	GL_TEXTURE_2D,
>  	GL_TEXTURE_RECTANGLE,
> @@ -60,7 +60,7 @@ static const GLenum target[] = {
>  	GL_TEXTURE_3D,
>  };
>  
> -static const GLenum internalformat[] = {
> +static const GLenum internalformats[] = {
>  	GL_RGBA8,
>  	GL_RGBA16,
>  	GL_RGBA32F,
> @@ -157,6 +157,59 @@ initTexData(GLenum target, uint64_t sideLength)
>  	return ((GLfloat *) calloc(nPixels * COLOR_COMPONENTS, sizeof(float)));
>  }
>  
> +/** Convert the given format to an index */
> +static int
> +format_to_index(GLenum format)
> +{
> +	int i;
> +	for (i = 0; i < ARRAY_SIZE(internalformats); i++) {
> +		if (format == internalformats[i])
> +			return i;
> +	}
> +	assert(!"Unexpected format");
> +	return 0;
> +}
> +
> +/** Convert the given target to an index */
> +static int
> +target_to_index(GLenum target)
> +{
> +	int i;
> +	for (i = 0; i < ARRAY_SIZE(targets); i++) {
> +		if (target == targets[i])
> +			return i;
> +	}
> +	assert(!"Unexpected target");
> +	return 0;
> +}
> +
> +/** to save max texture size info */
> +static int max_texture_size[ARRAY_SIZE(targets)][ARRAY_SIZE(internalformats)];
> +
> +/**
> + * Save the max side value for the given target/format.
> + * This is done when testing the proxy targets.
> + */
> +static void
> +save_max_side(GLenum target, GLenum format, int maxSide)
> +{
> +	int tex = target_to_index(target), fmt = format_to_index(format);
> +	max_texture_size[tex][fmt] = maxSide;
> +}
> +
> +/**
> + * Get the max side value for the given target/format.
> + * This is done when testing the non-proxy targets.
> + */
> +static int
> +get_max_side(GLenum target, GLenum format)
> +{
> +	int tex = target_to_index(target), fmt = format_to_index(format);
> +	int maxSide = max_texture_size[tex][fmt];
> +	assert(maxSide);
> +	return maxSide;
> +}
> +
>  static void
>  test_proxy_texture_size(GLenum target, GLenum internalformat)
>  {
> @@ -221,6 +274,8 @@ test_proxy_texture_size(GLenum target, GLenum internalformat)
>  		result = PIGLIT_PASS;
>  	}
>  
> +	save_max_side(target, internalformat, maxSide);
> +
>  	piglit_report_subtest_result(result, "%s-%s",
>  				     piglit_get_gl_enum_name(getProxyTarget(target)),
>  				     piglit_get_gl_enum_name(internalformat));
> @@ -257,8 +312,8 @@ test_non_proxy_texture_size(GLenum target, GLenum internalformat)
>  	glTexParameteri(target, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
>  	glTexParameteri(target, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
>  
> -	/* Query the largest supported texture size */
> -	glGetIntegerv(getMaxTarget(target), &maxSide);
> +	/* Use the max texture size which was found when testing the proxy targets */
> +	maxSide = get_max_side(target, internalformat);
>  
>  	printf("%s, Internal Format = %s, Largest Texture Size = %d\n",
>  	       piglit_get_gl_enum_name(target),
> @@ -347,16 +402,16 @@ static void
>  for_targets_and_formats(void(*test)(GLenum, GLenum))
>  {
>  	int i, j;
> -	for (i = 0; i < ARRAY_SIZE(target); i++) {
> -		for (j = 0; j < ARRAY_SIZE(internalformat); j++) {
> +	for (i = 0; i < ARRAY_SIZE(targets); i++) {
> +		for (j = 0; j < ARRAY_SIZE(internalformats); j++) {
>  			/* Skip floating point formats if
>  			 * GL_ARB_texture_float is not supported
>  			 */
> -			if ((internalformat[j] == GL_RGBA16F ||
> -			    internalformat[j] == GL_RGBA32F) &&
> +			if ((internalformats[j] == GL_RGBA16F ||
> +			    internalformats[j] == GL_RGBA32F) &&
>  			    !piglit_is_extension_supported("GL_ARB_texture_float"))
>  				continue;
> -			 test(target[i], internalformat[j]);
> +			 test(targets[i], internalformats[j]);
>  		}
>  	}
>  }
>
On 02/10/2014 07:49 PM, Ian Romanick wrote:
> On 02/07/2014 03:34 PM, Brian Paul wrote:
>> Save the max texture size found with the proxy targets.  Then use
>> that max size when we test the regular/non-proxy targets with
>> glTexImage and glTexSubImage().
>>
>> The whole point of proxy textures is to be able to probe the maximum
>> texture size.  So let's use that size when we try the real textures.
>> That's what an application would typically do.
>>
>> As it was, most of the GL_TEXTURE_3D tests were returning 'skip'
>> results because we couldn't allocate a 2048^3 or 1024^3 texture.
>> Now we should get pass/fail/crash when we try creating an N^3
>> texture when OpenGL told us that N should work.
>
> Which hardware, if any, have you tried this on?  Any closed source drivers?

Yes, NVIDIA's driver.  The test behaves the same way before and after 
this change.  NVIDIA's proxy texture tests always pass for the max 
advertised texture size.  Ex: it happily says a 2048^3 x RGBA32F 3D 
texture is doable.  But then our call to calloc() fails the test just 
reports 'skip'.

But I've also found that this test (NVIDIA) is sensitive to whatever 
else might be running.  In my first run I also had several VMs running 
on my system (using a fair amount of RAM and VRAM) and max-texture-size 
hung my system when it was testing a 16384 RGBA32F cube map.

I'm curious what AMD's driver does.

-Brian
On 02/11/2014 07:59 AM, Brian Paul wrote:
> On 02/10/2014 07:49 PM, Ian Romanick wrote:
>> On 02/07/2014 03:34 PM, Brian Paul wrote:
>>> Save the max texture size found with the proxy targets.  Then use
>>> that max size when we test the regular/non-proxy targets with
>>> glTexImage and glTexSubImage().
>>>
>>> The whole point of proxy textures is to be able to probe the maximum
>>> texture size.  So let's use that size when we try the real textures.
>>> That's what an application would typically do.

Right now, the proxy cases are split out as separate subtests...but they
don't really test much...just that you don't get a GL error.  I suppose
they could also check that the value obtained via proxy textures is <=
the advertised maximum.  Not sure how valuable that would be.

But maybe we should drop the "test" aspect of the proxy texture code and
just use it as a mechanism to figure out what size to try in the "real"
tests...

>>> As it was, most of the GL_TEXTURE_3D tests were returning 'skip'
>>> results because we couldn't allocate a 2048^3 or 1024^3 texture.
>>> Now we should get pass/fail/crash when we try creating an N^3
>>> texture when OpenGL told us that N should work.
>>
>> Which hardware, if any, have you tried this on?  Any closed source
>> drivers?
> 
> Yes, NVIDIA's driver.  The test behaves the same way before and after
> this change.  NVIDIA's proxy texture tests always pass for the max
> advertised texture size.  Ex: it happily says a 2048^3 x RGBA32F 3D
> texture is doable.  But then our call to calloc() fails the test just
> reports 'skip'.

I'm not clear whether we actually want to calloc data to pass to
TexImage.  It seems like passing NULL ought to be sufficient, since the
driver should still allocate storage for it (so it could be populated
via rendering)...but maybe that could be deferred.

It seems like calloc'ing a huge amount of data just increases the
likelihood of GL_OUT_OF_MEMORY errors....

> But I've also found that this test (NVIDIA) is sensitive to whatever
> else might be running.  In my first run I also had several VMs running
> on my system (using a fair amount of RAM and VRAM) and max-texture-size
> hung my system when it was testing a 16384 RGBA32F cube map.
> 
> I'm curious what AMD's driver does.
> 
> -Brian

Whatever you want to do here is probably fine.  I was mostly working on
it because I needed to raise our driver's limit to make an application
work, and discovered there was no way I could pass the piglit test as
written...even though there wasn't a bug in my driver.
-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA1

On 02/16/2014 03:31 PM, Kenneth Graunke wrote:
> On 02/11/2014 07:59 AM, Brian Paul wrote:
>> On 02/10/2014 07:49 PM, Ian Romanick wrote:
>>> On 02/07/2014 03:34 PM, Brian Paul wrote:
>>>> Save the max texture size found with the proxy targets.  Then
>>>> use that max size when we test the regular/non-proxy targets
>>>> with glTexImage and glTexSubImage().
>>>> 
>>>> The whole point of proxy textures is to be able to probe the
>>>> maximum texture size.  So let's use that size when we try the
>>>> real textures. That's what an application would typically
>>>> do.
> 
> Right now, the proxy cases are split out as separate subtests...but
> they don't really test much...just that you don't get a GL error.
> I suppose they could also check that the value obtained via proxy
> textures is <= the advertised maximum.  Not sure how valuable that
> would be.
> 
> But maybe we should drop the "test" aspect of the proxy texture
> code and just use it as a mechanism to figure out what size to try
> in the "real" tests...
> 
>>>> As it was, most of the GL_TEXTURE_3D tests were returning
>>>> 'skip' results because we couldn't allocate a 2048^3 or
>>>> 1024^3 texture. Now we should get pass/fail/crash when we try
>>>> creating an N^3 texture when OpenGL told us that N should
>>>> work.
>>> 
>>> Which hardware, if any, have you tried this on?  Any closed
>>> source drivers?
>> 
>> Yes, NVIDIA's driver.  The test behaves the same way before and
>> after this change.  NVIDIA's proxy texture tests always pass for
>> the max advertised texture size.  Ex: it happily says a 2048^3 x
>> RGBA32F 3D texture is doable.  But then our call to calloc()
>> fails the test just reports 'skip'.
> 
> I'm not clear whether we actually want to calloc data to pass to 
> TexImage.  It seems like passing NULL ought to be sufficient, since
> the driver should still allocate storage for it (so it could be
> populated via rendering)...but maybe that could be deferred.

That's a really good suggestion.  Doing a glTexImage2D with NULL
pixels followed by a glTexSubImage2D of a single pixel in the middle
of the texture should do the trick.

> It seems like calloc'ing a huge amount of data just increases the 
> likelihood of GL_OUT_OF_MEMORY errors....
> 
>> But I've also found that this test (NVIDIA) is sensitive to
>> whatever else might be running.  In my first run I also had
>> several VMs running on my system (using a fair amount of RAM and
>> VRAM) and max-texture-size hung my system when it was testing a
>> 16384 RGBA32F cube map.
>> 
>> I'm curious what AMD's driver does.
>> 
>> -Brian
> 
> Whatever you want to do here is probably fine.  I was mostly
> working on it because I needed to raise our driver's limit to make
> an application work, and discovered there was no way I could pass
> the piglit test as written...even though there wasn't a bug in my
> driver.

-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.14 (GNU/Linux)

iEYEARECAAYFAlMD8OkACgkQX1gOwKyEAw8ydACdHNTvvoXIOopWkrkd0qGZIqqs
YCUAnjZ9Qs4T5c3CRZTJ+P2DCxY0jIyy
=rRSY
-----END PGP SIGNATURE-----
On 02/18/2014 03:46 PM, Ian Romanick wrote:
> On 02/16/2014 03:31 PM, Kenneth Graunke wrote:
>> On 02/11/2014 07:59 AM, Brian Paul wrote:
>>> On 02/10/2014 07:49 PM, Ian Romanick wrote:
>>>> On 02/07/2014 03:34 PM, Brian Paul wrote:
>>>>> Save the max texture size found with the proxy targets.  Then
>>>>> use that max size when we test the regular/non-proxy targets
>>>>> with glTexImage and glTexSubImage().
>>>>>
>>>>> The whole point of proxy textures is to be able to probe the
>>>>> maximum texture size.  So let's use that size when we try the
>>>>> real textures. That's what an application would typically
>>>>> do.
> 
>> Right now, the proxy cases are split out as separate subtests...but
>> they don't really test much...just that you don't get a GL error.
>> I suppose they could also check that the value obtained via proxy
>> textures is <= the advertised maximum.  Not sure how valuable that
>> would be.
> 
>> But maybe we should drop the "test" aspect of the proxy texture
>> code and just use it as a mechanism to figure out what size to try
>> in the "real" tests...
> 
>>>>> As it was, most of the GL_TEXTURE_3D tests were returning
>>>>> 'skip' results because we couldn't allocate a 2048^3 or
>>>>> 1024^3 texture. Now we should get pass/fail/crash when we try
>>>>> creating an N^3 texture when OpenGL told us that N should
>>>>> work.
>>>>
>>>> Which hardware, if any, have you tried this on?  Any closed
>>>> source drivers?
>>>
>>> Yes, NVIDIA's driver.  The test behaves the same way before and
>>> after this change.  NVIDIA's proxy texture tests always pass for
>>> the max advertised texture size.  Ex: it happily says a 2048^3 x
>>> RGBA32F 3D texture is doable.  But then our call to calloc()
>>> fails the test just reports 'skip'.
> 
>> I'm not clear whether we actually want to calloc data to pass to 
>> TexImage.  It seems like passing NULL ought to be sufficient, since
>> the driver should still allocate storage for it (so it could be
>> populated via rendering)...but maybe that could be deferred.
> 
> That's a really good suggestion.  Doing a glTexImage2D with NULL
> pixels followed by a glTexSubImage2D of a single pixel in the middle
> of the texture should do the trick.

Ah, the subsequent TexSubImage2D trick is pretty clever.  That way the
driver will actually create the storage...

--Ken
Finally getting back to this one...

On 02/16/2014 04:31 PM, Kenneth Graunke wrote:
> On 02/11/2014 07:59 AM, Brian Paul wrote:
>> On 02/10/2014 07:49 PM, Ian Romanick wrote:
>>> On 02/07/2014 03:34 PM, Brian Paul wrote:
>>>> Save the max texture size found with the proxy targets.  Then use
>>>> that max size when we test the regular/non-proxy targets with
>>>> glTexImage and glTexSubImage().
>>>>
>>>> The whole point of proxy textures is to be able to probe the maximum
>>>> texture size.  So let's use that size when we try the real textures.
>>>> That's what an application would typically do.
>
> Right now, the proxy cases are split out as separate subtests...but they
> don't really test much...just that you don't get a GL error.  I suppose
> they could also check that the value obtained via proxy textures is <=
> the advertised maximum.  Not sure how valuable that would be.
>
> But maybe we should drop the "test" aspect of the proxy texture code and
> just use it as a mechanism to figure out what size to try in the "real"
> tests...
>
>>>> As it was, most of the GL_TEXTURE_3D tests were returning 'skip'
>>>> results because we couldn't allocate a 2048^3 or 1024^3 texture.
>>>> Now we should get pass/fail/crash when we try creating an N^3
>>>> texture when OpenGL told us that N should work.
>>>
>>> Which hardware, if any, have you tried this on?  Any closed source
>>> drivers?
>>
>> Yes, NVIDIA's driver.  The test behaves the same way before and after
>> this change.  NVIDIA's proxy texture tests always pass for the max
>> advertised texture size.  Ex: it happily says a 2048^3 x RGBA32F 3D
>> texture is doable.  But then our call to calloc() fails the test just
>> reports 'skip'.
>
> I'm not clear whether we actually want to calloc data to pass to
> TexImage.  It seems like passing NULL ought to be sufficient, since the
> driver should still allocate storage for it (so it could be populated
> via rendering)...but maybe that could be deferred.

The glTexImage() calls as-is now pass pixels=NULL.  It's only the 
glTexSubImage() calls that use the calloc'd memory.


> It seems like calloc'ing a huge amount of data just increases the
> likelihood of GL_OUT_OF_MEMORY errors....
>
>> But I've also found that this test (NVIDIA) is sensitive to whatever
>> else might be running.  In my first run I also had several VMs running
>> on my system (using a fair amount of RAM and VRAM) and max-texture-size
>> hung my system when it was testing a 16384 RGBA32F cube map.
>>
>> I'm curious what AMD's driver does.
>>
>> -Brian
>
> Whatever you want to do here is probably fine.  I was mostly working on
> it because I needed to raise our driver's limit to make an application
> work, and discovered there was no way I could pass the piglit test as
> written...even though there wasn't a bug in my driver.
>

Can I get an R-b?  I'll do another patch which changes glTexSubImage() 
to just do a 1x1 subimage to avoid the large calloc() issue.

-Brian