teximage-colors: fix bogus precision assumptions and other issues

Submitted by sroland@vmware.com on Oct. 9, 2014, midnight

Details

Message ID 1412812817-9045-1-git-send-email-sroland@vmware.com
State New
Headers show

Not browsing as part of any series.

Commit Message

sroland@vmware.com Oct. 9, 2014, midnight
From: Roland Scheidegger <sroland@vmware.com>

Some formats had some "implied" precision which relied on the driver picking a
specific hw format (e.g. RGB5 and RGB5 both relied on driver picking 565).
These tests will now be skipped (for the exact test).
Others didn't have any implied precision but relied on driver picking a format
with at least 8 bits even though the internal format only implied 4.
Also, the exact tests for srgb8 and srgb8_alpha8 were failing too due to using
GL_BYTE instead of GL_UNSIGNED_BYTE.
With these fixes the only tests llvmpipe is failing seem to be the exact
GL_RGB8_SNORM and GL_RGB16_SNORM ones (1, 2 or 4 channels work and the errors
seem to be one bit so maybe triggers some conversion somewhere using different
signed conversion formula).
---
 tests/texturing/teximage-colors.c | 43 ++++++++++++++++++++++++++++++++++-----
 1 file changed, 38 insertions(+), 5 deletions(-)

Patch hide | download patch | download mbox

diff --git a/tests/texturing/teximage-colors.c b/tests/texturing/teximage-colors.c
index 2bee02f..dcbab65 100644
--- a/tests/texturing/teximage-colors.c
+++ b/tests/texturing/teximage-colors.c
@@ -60,12 +60,12 @@  struct texture_format formats[] = {
 
 	FORMAT(GL_RGB, GL_RGB, GL_NONE),
 	FORMAT(GL_R3_G3_B2, GL_RGB, GL_UNSIGNED_BYTE_3_3_2),
-	FORMAT(GL_RGB4, GL_RGB, GL_UNSIGNED_SHORT_5_6_5),
-	FORMAT(GL_RGB5, GL_RGB, GL_UNSIGNED_SHORT_5_6_5),
+	FORMAT(GL_RGB4, GL_RGB, GL_NONE),
+	FORMAT(GL_RGB5, GL_RGB, GL_NONE),
 	FORMAT(GL_RGB8, GL_RGB, GL_UNSIGNED_BYTE),
 	FORMAT(GL_RGB8_SNORM, GL_RGB, GL_BYTE),
-	FORMAT(GL_SRGB8, GL_RGB, GL_BYTE),
-	FORMAT(GL_RGB10, GL_RGB, GL_UNSIGNED_SHORT),
+	FORMAT(GL_SRGB8, GL_RGB, GL_UNSIGNED_BYTE),
+	FORMAT(GL_RGB10, GL_RGB, GL_NONE),
 	FORMAT(GL_R11F_G11F_B10F, GL_RGB, GL_NONE),
 	FORMAT(GL_RGB12, GL_RGB, GL_NONE),
 	FORMAT(GL_RGB9_E5, GL_RGB, GL_NONE),
@@ -81,7 +81,7 @@  struct texture_format formats[] = {
 	FORMAT(GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE),
 	FORMAT(GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_10_10_10_2),
 	FORMAT(GL_RGBA8_SNORM, GL_RGBA, GL_BYTE),
-	FORMAT(GL_SRGB8_ALPHA8, GL_RGBA, GL_BYTE),
+	FORMAT(GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE),
 	FORMAT(GL_RGBA12, GL_RGBA, GL_NONE),
 	FORMAT(GL_RGBA16, GL_RGBA, GL_UNSIGNED_SHORT),
 	FORMAT(GL_RGBA16_SNORM, GL_RGBA, GL_SHORT),
@@ -543,6 +543,39 @@  piglit_init(int argc, char **argv)
 		tolerance[3] = 0.3;
 		break;
 	}
+
+	/* The tolerance lowering above only works for formats which have
+           explicit data types associated with them and even then it's fishy
+           for some.
+           The default sort of assumes at least 7 bits which doesn't make
+	   much sense in any case (for the specific formats with more bits).
+	   But just fix the cases which cannot pass (unless the driver encodes
+	   them with more bits). */
+	switch (format->internal_format) {
+	case GL_RGB4:
+		tolerance[0] = 0.1;
+		tolerance[1] = 0.1;
+		tolerance[2] = 0.1;
+		tolerance[3] = 0.1;
+		break;
+	case GL_RGB5:
+		tolerance[0] = 0.05;
+		tolerance[1] = 0.05;
+		tolerance[2] = 0.05;
+		break;
+	case GL_LUMINANCE4_ALPHA4:
+		tolerance[0] = 0.1;
+		tolerance[1] = 0.1;
+		tolerance[2] = 0.1;
+		tolerance[3] = 0.1;
+		break;
+        case GL_LUMINANCE6_ALPHA2: /* broken but everybody uses 8+8 bits */
+        case GL_LUMINANCE4: /* broken but presumably noone uses just 4 bits */
+	case GL_ALPHA4: /* broken but presumably noone uses just 4 bits */
+	case GL_RGBA2: /* broken (4444) but everybody uses more bits anyway */
+	default:
+		break;
+	}
 }
 
 void

Comments

On 10/08/2014 06:00 PM, sroland@vmware.com wrote:
> From: Roland Scheidegger <sroland@vmware.com>
>
> Some formats had some "implied" precision which relied on the driver picking a
> specific hw format (e.g. RGB5 and RGB5 both relied on driver picking 565).
> These tests will now be skipped (for the exact test).
> Others didn't have any implied precision but relied on driver picking a format
> with at least 8 bits even though the internal format only implied 4.
> Also, the exact tests for srgb8 and srgb8_alpha8 were failing too due to using
> GL_BYTE instead of GL_UNSIGNED_BYTE.
> With these fixes the only tests llvmpipe is failing seem to be the exact
> GL_RGB8_SNORM and GL_RGB16_SNORM ones (1, 2 or 4 channels work and the errors
> seem to be one bit so maybe triggers some conversion somewhere using different
> signed conversion formula).
> ---
>   tests/texturing/teximage-colors.c | 43 ++++++++++++++++++++++++++++++++++-----
>   1 file changed, 38 insertions(+), 5 deletions(-)
>
> diff --git a/tests/texturing/teximage-colors.c b/tests/texturing/teximage-colors.c
> index 2bee02f..dcbab65 100644
> --- a/tests/texturing/teximage-colors.c
> +++ b/tests/texturing/teximage-colors.c
> @@ -60,12 +60,12 @@ struct texture_format formats[] = {
>
>   	FORMAT(GL_RGB, GL_RGB, GL_NONE),
>   	FORMAT(GL_R3_G3_B2, GL_RGB, GL_UNSIGNED_BYTE_3_3_2),
> -	FORMAT(GL_RGB4, GL_RGB, GL_UNSIGNED_SHORT_5_6_5),
> -	FORMAT(GL_RGB5, GL_RGB, GL_UNSIGNED_SHORT_5_6_5),
> +	FORMAT(GL_RGB4, GL_RGB, GL_NONE),
> +	FORMAT(GL_RGB5, GL_RGB, GL_NONE),
>   	FORMAT(GL_RGB8, GL_RGB, GL_UNSIGNED_BYTE),
>   	FORMAT(GL_RGB8_SNORM, GL_RGB, GL_BYTE),
> -	FORMAT(GL_SRGB8, GL_RGB, GL_BYTE),
> -	FORMAT(GL_RGB10, GL_RGB, GL_UNSIGNED_SHORT),
> +	FORMAT(GL_SRGB8, GL_RGB, GL_UNSIGNED_BYTE),
> +	FORMAT(GL_RGB10, GL_RGB, GL_NONE),
>   	FORMAT(GL_R11F_G11F_B10F, GL_RGB, GL_NONE),
>   	FORMAT(GL_RGB12, GL_RGB, GL_NONE),
>   	FORMAT(GL_RGB9_E5, GL_RGB, GL_NONE),
> @@ -81,7 +81,7 @@ struct texture_format formats[] = {
>   	FORMAT(GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE),
>   	FORMAT(GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_10_10_10_2),
>   	FORMAT(GL_RGBA8_SNORM, GL_RGBA, GL_BYTE),
> -	FORMAT(GL_SRGB8_ALPHA8, GL_RGBA, GL_BYTE),
> +	FORMAT(GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE),
>   	FORMAT(GL_RGBA12, GL_RGBA, GL_NONE),
>   	FORMAT(GL_RGBA16, GL_RGBA, GL_UNSIGNED_SHORT),
>   	FORMAT(GL_RGBA16_SNORM, GL_RGBA, GL_SHORT),
> @@ -543,6 +543,39 @@ piglit_init(int argc, char **argv)
>   		tolerance[3] = 0.3;
>   		break;
>   	}
> +
> +	/* The tolerance lowering above only works for formats which have
> +           explicit data types associated with them and even then it's fishy
> +           for some.
> +           The default sort of assumes at least 7 bits which doesn't make
> +	   much sense in any case (for the specific formats with more bits).
> +	   But just fix the cases which cannot pass (unless the driver encodes
> +	   them with more bits). */

The comment is indented with a mix of spaces and tabs.  For block 
comments, we usually prefix each line with '*'.


> +	switch (format->internal_format) {
> +	case GL_RGB4:
> +		tolerance[0] = 0.1;
> +		tolerance[1] = 0.1;
> +		tolerance[2] = 0.1;
> +		tolerance[3] = 0.1;
> +		break;
> +	case GL_RGB5:
> +		tolerance[0] = 0.05;
> +		tolerance[1] = 0.05;
> +		tolerance[2] = 0.05;
> +		break;
> +	case GL_LUMINANCE4_ALPHA4:
> +		tolerance[0] = 0.1;
> +		tolerance[1] = 0.1;
> +		tolerance[2] = 0.1;
> +		tolerance[3] = 0.1;
> +		break;
> +        case GL_LUMINANCE6_ALPHA2: /* broken but everybody uses 8+8 bits */
> +        case GL_LUMINANCE4: /* broken but presumably noone uses just 4 bits */
> +	case GL_ALPHA4: /* broken but presumably noone uses just 4 bits */
> +	case GL_RGBA2: /* broken (4444) but everybody uses more bits anyway */

Need some tab indents there too.


> +	default:
> +		break;
> +	}
>   }
>
>   void
>

Looks OK otherwise.
Reviewed-by: Brian Paul <brianp@vmware.com>
With the formatting fixed:
Reviewed-by: Jason Ekstrand <jason.ekstrand@intel.com>

On Mon, Oct 13, 2014 at 8:59 AM, Brian Paul <brianp@vmware.com> wrote:

> On 10/08/2014 06:00 PM, sroland@vmware.com wrote:
>
>> From: Roland Scheidegger <sroland@vmware.com>
>>
>> Some formats had some "implied" precision which relied on the driver
>> picking a
>> specific hw format (e.g. RGB5 and RGB5 both relied on driver picking 565).
>> These tests will now be skipped (for the exact test).
>> Others didn't have any implied precision but relied on driver picking a
>> format
>> with at least 8 bits even though the internal format only implied 4.
>> Also, the exact tests for srgb8 and srgb8_alpha8 were failing too due to
>> using
>> GL_BYTE instead of GL_UNSIGNED_BYTE.
>> With these fixes the only tests llvmpipe is failing seem to be the exact
>> GL_RGB8_SNORM and GL_RGB16_SNORM ones (1, 2 or 4 channels work and the
>> errors
>> seem to be one bit so maybe triggers some conversion somewhere using
>> different
>> signed conversion formula).
>> ---
>>   tests/texturing/teximage-colors.c | 43 ++++++++++++++++++++++++++++++
>> ++++-----
>>   1 file changed, 38 insertions(+), 5 deletions(-)
>>
>> diff --git a/tests/texturing/teximage-colors.c
>> b/tests/texturing/teximage-colors.c
>> index 2bee02f..dcbab65 100644
>> --- a/tests/texturing/teximage-colors.c
>> +++ b/tests/texturing/teximage-colors.c
>> @@ -60,12 +60,12 @@ struct texture_format formats[] = {
>>
>>         FORMAT(GL_RGB, GL_RGB, GL_NONE),
>>         FORMAT(GL_R3_G3_B2, GL_RGB, GL_UNSIGNED_BYTE_3_3_2),
>> -       FORMAT(GL_RGB4, GL_RGB, GL_UNSIGNED_SHORT_5_6_5),
>> -       FORMAT(GL_RGB5, GL_RGB, GL_UNSIGNED_SHORT_5_6_5),
>> +       FORMAT(GL_RGB4, GL_RGB, GL_NONE),
>> +       FORMAT(GL_RGB5, GL_RGB, GL_NONE),
>>         FORMAT(GL_RGB8, GL_RGB, GL_UNSIGNED_BYTE),
>>         FORMAT(GL_RGB8_SNORM, GL_RGB, GL_BYTE),
>> -       FORMAT(GL_SRGB8, GL_RGB, GL_BYTE),
>> -       FORMAT(GL_RGB10, GL_RGB, GL_UNSIGNED_SHORT),
>> +       FORMAT(GL_SRGB8, GL_RGB, GL_UNSIGNED_BYTE),
>> +       FORMAT(GL_RGB10, GL_RGB, GL_NONE),
>>         FORMAT(GL_R11F_G11F_B10F, GL_RGB, GL_NONE),
>>         FORMAT(GL_RGB12, GL_RGB, GL_NONE),
>>         FORMAT(GL_RGB9_E5, GL_RGB, GL_NONE),
>> @@ -81,7 +81,7 @@ struct texture_format formats[] = {
>>         FORMAT(GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE),
>>         FORMAT(GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_10_10_10_2),
>>         FORMAT(GL_RGBA8_SNORM, GL_RGBA, GL_BYTE),
>> -       FORMAT(GL_SRGB8_ALPHA8, GL_RGBA, GL_BYTE),
>> +       FORMAT(GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE),
>>         FORMAT(GL_RGBA12, GL_RGBA, GL_NONE),
>>         FORMAT(GL_RGBA16, GL_RGBA, GL_UNSIGNED_SHORT),
>>         FORMAT(GL_RGBA16_SNORM, GL_RGBA, GL_SHORT),
>> @@ -543,6 +543,39 @@ piglit_init(int argc, char **argv)
>>                 tolerance[3] = 0.3;
>>                 break;
>>         }
>> +
>> +       /* The tolerance lowering above only works for formats which have
>> +           explicit data types associated with them and even then it's
>> fishy
>> +           for some.
>> +           The default sort of assumes at least 7 bits which doesn't make
>> +          much sense in any case (for the specific formats with more
>> bits).
>> +          But just fix the cases which cannot pass (unless the driver
>> encodes
>> +          them with more bits). */
>>
>
> The comment is indented with a mix of spaces and tabs.  For block
> comments, we usually prefix each line with '*'.
>
>
>  +       switch (format->internal_format) {
>> +       case GL_RGB4:
>> +               tolerance[0] = 0.1;
>> +               tolerance[1] = 0.1;
>> +               tolerance[2] = 0.1;
>> +               tolerance[3] = 0.1;
>> +               break;
>> +       case GL_RGB5:
>> +               tolerance[0] = 0.05;
>> +               tolerance[1] = 0.05;
>> +               tolerance[2] = 0.05;
>> +               break;
>> +       case GL_LUMINANCE4_ALPHA4:
>> +               tolerance[0] = 0.1;
>> +               tolerance[1] = 0.1;
>> +               tolerance[2] = 0.1;
>> +               tolerance[3] = 0.1;
>> +               break;
>> +        case GL_LUMINANCE6_ALPHA2: /* broken but everybody uses 8+8 bits
>> */
>> +        case GL_LUMINANCE4: /* broken but presumably noone uses just 4
>> bits */
>> +       case GL_ALPHA4: /* broken but presumably noone uses just 4 bits */
>> +       case GL_RGBA2: /* broken (4444) but everybody uses more bits
>> anyway */
>>
>
> Need some tab indents there too.
>
>
>  +       default:
>> +               break;
>> +       }
>>   }
>>
>>   void
>>
>>
> Looks OK otherwise.
> Reviewed-by: Brian Paul <brianp@vmware.com>
>
>