diff mbox series

[4/8] softfloat: Add float_cmask and constants

Message ID 20200924012453.659757-5-richard.henderson@linaro.org
State Superseded
Headers show
Series softfloat: Implement float128_muladd | expand

Commit Message

Richard Henderson Sept. 24, 2020, 1:24 a.m. UTC
Testing more than one class at a time is better done with masks.
This reduces the static branch count.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 fpu/softfloat.c | 31 ++++++++++++++++++++++++-------
 1 file changed, 24 insertions(+), 7 deletions(-)

Comments

David Hildenbrand Sept. 24, 2020, 7:40 a.m. UTC | #1
On 24.09.20 03:24, Richard Henderson wrote:
> Testing more than one class at a time is better done with masks.

> This reduces the static branch count.

> 

> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

> ---

>  fpu/softfloat.c | 31 ++++++++++++++++++++++++-------

>  1 file changed, 24 insertions(+), 7 deletions(-)

> 

> diff --git a/fpu/softfloat.c b/fpu/softfloat.c

> index 9db55d2b11..3e625c47cd 100644

> --- a/fpu/softfloat.c

> +++ b/fpu/softfloat.c

> @@ -469,6 +469,20 @@ typedef enum __attribute__ ((__packed__)) {

>      float_class_snan,

>  } FloatClass;

>  

> +#define float_cmask(bit)  (1u << (bit))

> +

> +enum {

> +    float_cmask_zero    = float_cmask(float_class_zero),

> +    float_cmask_normal  = float_cmask(float_class_normal),

> +    float_cmask_inf     = float_cmask(float_class_inf),

> +    float_cmask_qnan    = float_cmask(float_class_qnan),

> +    float_cmask_snan    = float_cmask(float_class_snan),

> +

> +    float_cmask_infzero = float_cmask_zero | float_cmask_inf,

> +    float_cmask_anynan  = float_cmask_qnan | float_cmask_snan,

> +};

> +

> +

>  /* Simple helpers for checking if, or what kind of, NaN we have */

>  static inline __attribute__((unused)) bool is_nan(FloatClass c)

>  {

> @@ -1335,24 +1349,27 @@ bfloat16 QEMU_FLATTEN bfloat16_mul(bfloat16 a, bfloat16 b, float_status *status)

>  static FloatParts muladd_floats(FloatParts a, FloatParts b, FloatParts c,

>                                  int flags, float_status *s)

>  {

> -    bool inf_zero = ((1 << a.cls) | (1 << b.cls)) ==

> -                    ((1 << float_class_inf) | (1 << float_class_zero));

> -    bool p_sign;

> +    bool inf_zero, p_sign;

>      bool sign_flip = flags & float_muladd_negate_result;

>      FloatClass p_class;

>      uint64_t hi, lo;

>      int p_exp;

> +    int ab_mask, abc_mask;

> +

> +    ab_mask = float_cmask(a.cls) | float_cmask(b.cls);

> +    abc_mask = float_cmask(c.cls) | ab_mask;

> +    inf_zero = ab_mask == float_cmask_infzero;

>  

>      /* It is implementation-defined whether the cases of (0,inf,qnan)

>       * and (inf,0,qnan) raise InvalidOperation or not (and what QNaN

>       * they return if they do), so we have to hand this information

>       * off to the target-specific pick-a-NaN routine.

>       */

> -    if (is_nan(a.cls) || is_nan(b.cls) || is_nan(c.cls)) {

> +    if (unlikely(abc_mask & float_cmask_anynan)) {

>          return pick_nan_muladd(a, b, c, inf_zero, s);

>      }

>  

> -    if (inf_zero) {

> +    if (unlikely(inf_zero)) {

>          s->float_exception_flags |= float_flag_invalid;

>          return parts_default_nan(s);

>      }

> @@ -1367,9 +1384,9 @@ static FloatParts muladd_floats(FloatParts a, FloatParts b, FloatParts c,

>          p_sign ^= 1;

>      }

>  

> -    if (a.cls == float_class_inf || b.cls == float_class_inf) {

> +    if (ab_mask & float_cmask_inf) {

>          p_class = float_class_inf;

> -    } else if (a.cls == float_class_zero || b.cls == float_class_zero) {

> +    } else if (ab_mask & float_cmask_zero) {

>          p_class = float_class_zero;

>      } else {

>          p_class = float_class_normal;

> 


Reviewed-by: David Hildenbrand <david@redhat.com>


-- 
Thanks,

David / dhildenb
diff mbox series

Patch

diff --git a/fpu/softfloat.c b/fpu/softfloat.c
index 9db55d2b11..3e625c47cd 100644
--- a/fpu/softfloat.c
+++ b/fpu/softfloat.c
@@ -469,6 +469,20 @@  typedef enum __attribute__ ((__packed__)) {
     float_class_snan,
 } FloatClass;
 
+#define float_cmask(bit)  (1u << (bit))
+
+enum {
+    float_cmask_zero    = float_cmask(float_class_zero),
+    float_cmask_normal  = float_cmask(float_class_normal),
+    float_cmask_inf     = float_cmask(float_class_inf),
+    float_cmask_qnan    = float_cmask(float_class_qnan),
+    float_cmask_snan    = float_cmask(float_class_snan),
+
+    float_cmask_infzero = float_cmask_zero | float_cmask_inf,
+    float_cmask_anynan  = float_cmask_qnan | float_cmask_snan,
+};
+
+
 /* Simple helpers for checking if, or what kind of, NaN we have */
 static inline __attribute__((unused)) bool is_nan(FloatClass c)
 {
@@ -1335,24 +1349,27 @@  bfloat16 QEMU_FLATTEN bfloat16_mul(bfloat16 a, bfloat16 b, float_status *status)
 static FloatParts muladd_floats(FloatParts a, FloatParts b, FloatParts c,
                                 int flags, float_status *s)
 {
-    bool inf_zero = ((1 << a.cls) | (1 << b.cls)) ==
-                    ((1 << float_class_inf) | (1 << float_class_zero));
-    bool p_sign;
+    bool inf_zero, p_sign;
     bool sign_flip = flags & float_muladd_negate_result;
     FloatClass p_class;
     uint64_t hi, lo;
     int p_exp;
+    int ab_mask, abc_mask;
+
+    ab_mask = float_cmask(a.cls) | float_cmask(b.cls);
+    abc_mask = float_cmask(c.cls) | ab_mask;
+    inf_zero = ab_mask == float_cmask_infzero;
 
     /* It is implementation-defined whether the cases of (0,inf,qnan)
      * and (inf,0,qnan) raise InvalidOperation or not (and what QNaN
      * they return if they do), so we have to hand this information
      * off to the target-specific pick-a-NaN routine.
      */
-    if (is_nan(a.cls) || is_nan(b.cls) || is_nan(c.cls)) {
+    if (unlikely(abc_mask & float_cmask_anynan)) {
         return pick_nan_muladd(a, b, c, inf_zero, s);
     }
 
-    if (inf_zero) {
+    if (unlikely(inf_zero)) {
         s->float_exception_flags |= float_flag_invalid;
         return parts_default_nan(s);
     }
@@ -1367,9 +1384,9 @@  static FloatParts muladd_floats(FloatParts a, FloatParts b, FloatParts c,
         p_sign ^= 1;
     }
 
-    if (a.cls == float_class_inf || b.cls == float_class_inf) {
+    if (ab_mask & float_cmask_inf) {
         p_class = float_class_inf;
-    } else if (a.cls == float_class_zero || b.cls == float_class_zero) {
+    } else if (ab_mask & float_cmask_zero) {
         p_class = float_class_zero;
     } else {
         p_class = float_class_normal;