[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH v2 09/14] hardfloat: support float32/64 multiplicati
From: |
Emilio G. Cota |
Subject: |
[Qemu-devel] [PATCH v2 09/14] hardfloat: support float32/64 multiplication |
Date: |
Tue, 27 Mar 2018 01:33:55 -0400 |
Performance results for fp-bench run under aarch64-linux-user
on an Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz host:
- before:
mul-single: 88.37 MFlops
mul-double: 85.55 MFlops
- after:
mul-single: 115.06 MFlops
mul-double: 124.67 MFlops
- w/ both using float32/64_is_normal etc.:
mul-single: 113.49 MFlops
mul-double: 113.46 MFlops
- w/ both using fpclassify etc.:
mul-single: 105.70 MFlops
mul-double: 127.69 MFlops
Signed-off-by: Emilio G. Cota <address@hidden>
---
fpu/softfloat.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 73 insertions(+), 4 deletions(-)
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
index e0ab0ca..9739a86 100644
--- a/fpu/softfloat.c
+++ b/fpu/softfloat.c
@@ -1044,8 +1044,8 @@ float16 __attribute__((flatten)) float16_mul(float16 a,
float16 b,
return float16_round_pack_canonical(pr, status);
}
-float32 __attribute__((flatten)) float32_mul(float32 a, float32 b,
- float_status *status)
+static float32 __attribute__((flatten, noinline))
+soft_float32_mul(float32 a, float32 b, float_status *status)
{
FloatParts pa = float32_unpack_canonical(a, status);
FloatParts pb = float32_unpack_canonical(b, status);
@@ -1054,8 +1054,8 @@ float32 __attribute__((flatten)) float32_mul(float32 a,
float32 b,
return float32_round_pack_canonical(pr, status);
}
-float64 __attribute__((flatten)) float64_mul(float64 a, float64 b,
- float_status *status)
+static float64 __attribute__((flatten, noinline))
+soft_float64_mul(float64 a, float64 b, float_status *status)
{
FloatParts pa = float64_unpack_canonical(a, status);
FloatParts pb = float64_unpack_canonical(b, status);
@@ -1064,6 +1064,75 @@ float64 __attribute__((flatten)) float64_mul(float64 a,
float64 b,
return float64_round_pack_canonical(pr, status);
}
+#define GEN_FPU_MUL(name, soft_t, host_t, host_abs_func, min_normal) \
+ soft_t name(soft_t a, soft_t b, float_status *s) \
+ { \
+ soft_t ## _input_flush2(&a, &b, s); \
+ if (likely((soft_t ## _is_normal(a) || soft_t ## _is_zero(a)) && \
+ (soft_t ## _is_normal(b) || soft_t ## _is_zero(b)) && \
+ s->float_exception_flags & float_flag_inexact && \
+ s->float_rounding_mode == float_round_nearest_even)) { \
+ if (soft_t ## _is_zero(a) || soft_t ## _is_zero(b)) { \
+ bool signbit = soft_t ## _is_neg(a) ^ soft_t ## _is_neg(b); \
+ \
+ return soft_t ## _set_sign(soft_t ## _zero, signbit); \
+ } else { \
+ host_t ha = soft_t ## _to_ ## host_t(a); \
+ host_t hb = soft_t ## _to_ ## host_t(b); \
+ host_t hr = ha * hb; \
+ soft_t r = host_t ## _to_ ## soft_t(hr); \
+ \
+ if (unlikely(soft_t ## _is_infinity(r))) { \
+ s->float_exception_flags |= float_flag_overflow; \
+ } else if (unlikely(host_abs_func(hr) <= min_normal)) { \
+ goto soft; \
+ } \
+ return r; \
+ } \
+ } \
+ soft: \
+ return soft_ ## soft_t ## _mul(a, b, s); \
+ }
+
+GEN_FPU_MUL(float32_mul, float32, float, fabsf, FLT_MIN)
+#undef GEN_FPU_MUL
+
+#define GEN_FPU_MUL(name, soft_t, host_t, host_abs_func, min_normal) \
+ soft_t name(soft_t a, soft_t b, float_status *s) \
+ { \
+ host_t ha, hb; \
+ \
+ soft_t ## _input_flush2(&a, &b, s); \
+ ha = soft_t ## _to_ ## host_t(a); \
+ hb = soft_t ## _to_ ## host_t(b); \
+ if (likely((fpclassify(ha) == FP_NORMAL || \
+ fpclassify(ha) == FP_ZERO) && \
+ (fpclassify(hb) == FP_NORMAL || \
+ fpclassify(hb) == FP_ZERO) && \
+ s->float_exception_flags & float_flag_inexact && \
+ s->float_rounding_mode == float_round_nearest_even)) { \
+ if (soft_t ## _is_zero(a) || soft_t ## _is_zero(b)) { \
+ bool signbit = soft_t ## _is_neg(a) ^ soft_t ## _is_neg(b); \
+ \
+ return soft_t ## _set_sign(soft_t ## _zero, signbit); \
+ } else { \
+ host_t hr = ha * hb; \
+ \
+ if (unlikely(isinf(hr))) { \
+ s->float_exception_flags |= float_flag_overflow; \
+ } else if (unlikely(host_abs_func(hr) <= min_normal)) { \
+ goto soft; \
+ } \
+ return host_t ## _to_ ## soft_t(hr); \
+ } \
+ } \
+ soft: \
+ return soft_ ## soft_t ## _mul(a, b, s); \
+ }
+
+GEN_FPU_MUL(float64_mul, float64, double, fabs, DBL_MIN)
+#undef GEN_FPU_MUL
+
/*
* Returns the result of multiplying the floating-point values `a' and
* `b' then adding 'c', with no intermediate rounding step after the
--
2.7.4
- [Qemu-devel] [PATCH v2 08/14] hardfloat: support float32/64 addition and subtraction, (continued)
- [Qemu-devel] [PATCH v2 08/14] hardfloat: support float32/64 addition and subtraction, Emilio G. Cota, 2018/03/27
- [Qemu-devel] [PATCH v2 05/14] softfloat: add float{32, 64}_is_{de, }normal, Emilio G. Cota, 2018/03/27
- [Qemu-devel] [PATCH v2 01/14] tests: add fp-bench, a collection of simple floating-point microbenchmarks, Emilio G. Cota, 2018/03/27
- [Qemu-devel] [PATCH v2 13/14] hardfloat: support float32/64 comparison, Emilio G. Cota, 2018/03/27
- [Qemu-devel] [PATCH v2 04/14] fp-test: add muladd variants, Emilio G. Cota, 2018/03/27
- [Qemu-devel] [PATCH v2 10/14] hardfloat: support float32/64 division, Emilio G. Cota, 2018/03/27
- [Qemu-devel] [PATCH v2 14/14] hardfloat: support float32_to_float64, Emilio G. Cota, 2018/03/27
- [Qemu-devel] [PATCH v2 07/14] fpu: introduce hardfloat, Emilio G. Cota, 2018/03/27
- [Qemu-devel] [PATCH v2 12/14] hardfloat: support float32/64 square root, Emilio G. Cota, 2018/03/27
- [Qemu-devel] [PATCH v2 09/14] hardfloat: support float32/64 multiplication,
Emilio G. Cota <=
- [Qemu-devel] [PATCH v2 11/14] hardfloat: support float32/64 fused multiply-add, Emilio G. Cota, 2018/03/27
- [Qemu-devel] [PATCH v2 02/14] tests: add fp-test, a floating point test suite, Emilio G. Cota, 2018/03/27
- Re: [Qemu-devel] [PATCH v2 00/14] fp-test + hardfloat, Bastian Koppelmann, 2018/03/27
- Re: [Qemu-devel] [PATCH v2 00/14] fp-test + hardfloat, Alex Bennée, 2018/03/28
- Re: [Qemu-devel] [PATCH v2 00/14] fp-test + hardfloat, no-reply, 2018/03/29