summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGravatar bunnei2016-05-26 18:28:00 -0400
committerGravatar bunnei2016-05-26 18:28:00 -0400
commit859c36c269a018918fc56763eb4f13916321c52a (patch)
tree3539bf2bd799c3d2b567f9abce811ded08c259a3 /src
parentMerge pull request #1846 from JayFoxRox/missing-dirty-lighting (diff)
parentRemove `exceptions` parameter from `normaliseround` VFP functions (diff)
downloadyuzu-859c36c269a018918fc56763eb4f13916321c52a.tar.gz
yuzu-859c36c269a018918fc56763eb4f13916321c52a.tar.xz
yuzu-859c36c269a018918fc56763eb4f13916321c52a.zip
Merge pull request #1810 from JayFoxRox/fix-float-exceptions
Fix float exceptions
Diffstat (limited to 'src')
-rw-r--r--src/core/arm/skyeye_common/vfp/vfp_helper.h20
-rw-r--r--src/core/arm/skyeye_common/vfp/vfpdouble.cpp101
-rw-r--r--src/core/arm/skyeye_common/vfp/vfpsingle.cpp100
3 files changed, 130 insertions, 91 deletions
diff --git a/src/core/arm/skyeye_common/vfp/vfp_helper.h b/src/core/arm/skyeye_common/vfp/vfp_helper.h
index 210972917..68714800c 100644
--- a/src/core/arm/skyeye_common/vfp/vfp_helper.h
+++ b/src/core/arm/skyeye_common/vfp/vfp_helper.h
@@ -271,8 +271,9 @@ inline int vfp_single_type(const vfp_single* s)
271// Unpack a single-precision float. Note that this returns the magnitude 271// Unpack a single-precision float. Note that this returns the magnitude
272// of the single-precision float mantissa with the 1. if necessary, 272// of the single-precision float mantissa with the 1. if necessary,
273// aligned to bit 30. 273// aligned to bit 30.
274inline void vfp_single_unpack(vfp_single* s, s32 val, u32* fpscr) 274inline u32 vfp_single_unpack(vfp_single* s, s32 val, u32 fpscr)
275{ 275{
276 u32 exceptions = 0;
276 s->sign = vfp_single_packed_sign(val) >> 16, 277 s->sign = vfp_single_packed_sign(val) >> 16,
277 s->exponent = vfp_single_packed_exponent(val); 278 s->exponent = vfp_single_packed_exponent(val);
278 279
@@ -283,12 +284,13 @@ inline void vfp_single_unpack(vfp_single* s, s32 val, u32* fpscr)
283 284
284 // If flush-to-zero mode is enabled, turn the denormal into zero. 285 // If flush-to-zero mode is enabled, turn the denormal into zero.
285 // On a VFPv2 architecture, the sign of the zero is always positive. 286 // On a VFPv2 architecture, the sign of the zero is always positive.
286 if ((*fpscr & FPSCR_FLUSH_TO_ZERO) != 0 && (vfp_single_type(s) & VFP_DENORMAL) != 0) { 287 if ((fpscr & FPSCR_FLUSH_TO_ZERO) != 0 && (vfp_single_type(s) & VFP_DENORMAL) != 0) {
287 s->sign = 0; 288 s->sign = 0;
288 s->exponent = 0; 289 s->exponent = 0;
289 s->significand = 0; 290 s->significand = 0;
290 *fpscr |= FPSCR_IDC; 291 exceptions |= FPSCR_IDC;
291 } 292 }
293 return exceptions;
292} 294}
293 295
294// Re-pack a single-precision float. This assumes that the float is 296// Re-pack a single-precision float. This assumes that the float is
@@ -302,7 +304,7 @@ inline s32 vfp_single_pack(const vfp_single* s)
302} 304}
303 305
304 306
305u32 vfp_single_normaliseround(ARMul_State* state, int sd, vfp_single* vs, u32 fpscr, u32 exceptions, const char* func); 307u32 vfp_single_normaliseround(ARMul_State* state, int sd, vfp_single* vs, u32 fpscr, const char* func);
306 308
307// Double-precision 309// Double-precision
308struct vfp_double { 310struct vfp_double {
@@ -357,8 +359,9 @@ inline int vfp_double_type(const vfp_double* s)
357// Unpack a double-precision float. Note that this returns the magnitude 359// Unpack a double-precision float. Note that this returns the magnitude
358// of the double-precision float mantissa with the 1. if necessary, 360// of the double-precision float mantissa with the 1. if necessary,
359// aligned to bit 62. 361// aligned to bit 62.
360inline void vfp_double_unpack(vfp_double* s, s64 val, u32* fpscr) 362inline u32 vfp_double_unpack(vfp_double* s, s64 val, u32 fpscr)
361{ 363{
364 u32 exceptions = 0;
362 s->sign = vfp_double_packed_sign(val) >> 48; 365 s->sign = vfp_double_packed_sign(val) >> 48;
363 s->exponent = vfp_double_packed_exponent(val); 366 s->exponent = vfp_double_packed_exponent(val);
364 367
@@ -369,12 +372,13 @@ inline void vfp_double_unpack(vfp_double* s, s64 val, u32* fpscr)
369 372
370 // If flush-to-zero mode is enabled, turn the denormal into zero. 373 // If flush-to-zero mode is enabled, turn the denormal into zero.
371 // On a VFPv2 architecture, the sign of the zero is always positive. 374 // On a VFPv2 architecture, the sign of the zero is always positive.
372 if ((*fpscr & FPSCR_FLUSH_TO_ZERO) != 0 && (vfp_double_type(s) & VFP_DENORMAL) != 0) { 375 if ((fpscr & FPSCR_FLUSH_TO_ZERO) != 0 && (vfp_double_type(s) & VFP_DENORMAL) != 0) {
373 s->sign = 0; 376 s->sign = 0;
374 s->exponent = 0; 377 s->exponent = 0;
375 s->significand = 0; 378 s->significand = 0;
376 *fpscr |= FPSCR_IDC; 379 exceptions |= FPSCR_IDC;
377 } 380 }
381 return exceptions;
378} 382}
379 383
380// Re-pack a double-precision float. This assumes that the float is 384// Re-pack a double-precision float. This assumes that the float is
@@ -447,4 +451,4 @@ inline u32 fls(u32 x)
447 451
448u32 vfp_double_multiply(vfp_double* vdd, vfp_double* vdn, vfp_double* vdm, u32 fpscr); 452u32 vfp_double_multiply(vfp_double* vdd, vfp_double* vdn, vfp_double* vdm, u32 fpscr);
449u32 vfp_double_add(vfp_double* vdd, vfp_double* vdn, vfp_double *vdm, u32 fpscr); 453u32 vfp_double_add(vfp_double* vdd, vfp_double* vdn, vfp_double *vdm, u32 fpscr);
450u32 vfp_double_normaliseround(ARMul_State* state, int dd, vfp_double* vd, u32 fpscr, u32 exceptions, const char* func); 454u32 vfp_double_normaliseround(ARMul_State* state, int dd, vfp_double* vd, u32 fpscr, const char* func);
diff --git a/src/core/arm/skyeye_common/vfp/vfpdouble.cpp b/src/core/arm/skyeye_common/vfp/vfpdouble.cpp
index 45914d479..580e60c85 100644
--- a/src/core/arm/skyeye_common/vfp/vfpdouble.cpp
+++ b/src/core/arm/skyeye_common/vfp/vfpdouble.cpp
@@ -85,11 +85,12 @@ static void vfp_double_normalise_denormal(struct vfp_double *vd)
85 vfp_double_dump("normalise_denormal: out", vd); 85 vfp_double_dump("normalise_denormal: out", vd);
86} 86}
87 87
88u32 vfp_double_normaliseround(ARMul_State* state, int dd, struct vfp_double *vd, u32 fpscr, u32 exceptions, const char *func) 88u32 vfp_double_normaliseround(ARMul_State* state, int dd, struct vfp_double *vd, u32 fpscr, const char *func)
89{ 89{
90 u64 significand, incr; 90 u64 significand, incr;
91 int exponent, shift, underflow; 91 int exponent, shift, underflow;
92 u32 rmode; 92 u32 rmode;
93 u32 exceptions = 0;
93 94
94 vfp_double_dump("pack: in", vd); 95 vfp_double_dump("pack: in", vd);
95 96
@@ -291,8 +292,9 @@ static u32 vfp_double_fsqrt(ARMul_State* state, int dd, int unused, int dm, u32
291 LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__); 292 LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
292 vfp_double vdm, vdd, *vdp; 293 vfp_double vdm, vdd, *vdp;
293 int ret, tm; 294 int ret, tm;
295 u32 exceptions = 0;
294 296
295 vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr); 297 exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
296 298
297 tm = vfp_double_type(&vdm); 299 tm = vfp_double_type(&vdm);
298 if (tm & (VFP_NAN|VFP_INFINITY)) { 300 if (tm & (VFP_NAN|VFP_INFINITY)) {
@@ -369,7 +371,8 @@ sqrt_invalid:
369 } 371 }
370 vdd.significand = vfp_shiftright64jamming(vdd.significand, 1); 372 vdd.significand = vfp_shiftright64jamming(vdd.significand, 1);
371 373
372 return vfp_double_normaliseround(state, dd, &vdd, fpscr, 0, "fsqrt"); 374 exceptions |= vfp_double_normaliseround(state, dd, &vdd, fpscr, "fsqrt");
375 return exceptions;
373} 376}
374 377
375/* 378/*
@@ -475,7 +478,7 @@ static u32 vfp_double_fcvts(ARMul_State* state, int sd, int unused, int dm, u32
475 u32 exceptions = 0; 478 u32 exceptions = 0;
476 479
477 LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__); 480 LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
478 vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr); 481 exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
479 482
480 tm = vfp_double_type(&vdm); 483 tm = vfp_double_type(&vdm);
481 484
@@ -504,7 +507,8 @@ static u32 vfp_double_fcvts(ARMul_State* state, int sd, int unused, int dm, u32
504 else 507 else
505 vsd.exponent = vdm.exponent - (1023 - 127); 508 vsd.exponent = vdm.exponent - (1023 - 127);
506 509
507 return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fcvts"); 510 exceptions |= vfp_single_normaliseround(state, sd, &vsd, fpscr, "fcvts");
511 return exceptions;
508 512
509pack_nan: 513pack_nan:
510 vfp_put_float(state, vfp_single_pack(&vsd), sd); 514 vfp_put_float(state, vfp_single_pack(&vsd), sd);
@@ -514,6 +518,7 @@ pack_nan:
514static u32 vfp_double_fuito(ARMul_State* state, int dd, int unused, int dm, u32 fpscr) 518static u32 vfp_double_fuito(ARMul_State* state, int dd, int unused, int dm, u32 fpscr)
515{ 519{
516 struct vfp_double vdm; 520 struct vfp_double vdm;
521 u32 exceptions = 0;
517 u32 m = vfp_get_float(state, dm); 522 u32 m = vfp_get_float(state, dm);
518 523
519 LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__); 524 LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
@@ -521,12 +526,14 @@ static u32 vfp_double_fuito(ARMul_State* state, int dd, int unused, int dm, u32
521 vdm.exponent = 1023 + 63 - 1; 526 vdm.exponent = 1023 + 63 - 1;
522 vdm.significand = (u64)m; 527 vdm.significand = (u64)m;
523 528
524 return vfp_double_normaliseround(state, dd, &vdm, fpscr, 0, "fuito"); 529 exceptions |= vfp_double_normaliseround(state, dd, &vdm, fpscr, "fuito");
530 return exceptions;
525} 531}
526 532
527static u32 vfp_double_fsito(ARMul_State* state, int dd, int unused, int dm, u32 fpscr) 533static u32 vfp_double_fsito(ARMul_State* state, int dd, int unused, int dm, u32 fpscr)
528{ 534{
529 struct vfp_double vdm; 535 struct vfp_double vdm;
536 u32 exceptions = 0;
530 u32 m = vfp_get_float(state, dm); 537 u32 m = vfp_get_float(state, dm);
531 538
532 LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__); 539 LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
@@ -534,7 +541,8 @@ static u32 vfp_double_fsito(ARMul_State* state, int dd, int unused, int dm, u32
534 vdm.exponent = 1023 + 63 - 1; 541 vdm.exponent = 1023 + 63 - 1;
535 vdm.significand = vdm.sign ? (~m + 1) : m; 542 vdm.significand = vdm.sign ? (~m + 1) : m;
536 543
537 return vfp_double_normaliseround(state, dd, &vdm, fpscr, 0, "fsito"); 544 exceptions |= vfp_double_normaliseround(state, dd, &vdm, fpscr, "fsito");
545 return exceptions;
538} 546}
539 547
540static u32 vfp_double_ftoui(ARMul_State* state, int sd, int unused, int dm, u32 fpscr) 548static u32 vfp_double_ftoui(ARMul_State* state, int sd, int unused, int dm, u32 fpscr)
@@ -545,7 +553,7 @@ static u32 vfp_double_ftoui(ARMul_State* state, int sd, int unused, int dm, u32
545 int tm; 553 int tm;
546 554
547 LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__); 555 LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
548 vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr); 556 exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
549 557
550 /* 558 /*
551 * Do we have a denormalised number? 559 * Do we have a denormalised number?
@@ -626,7 +634,7 @@ static u32 vfp_double_ftosi(ARMul_State* state, int sd, int unused, int dm, u32
626 int tm; 634 int tm;
627 635
628 LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__); 636 LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
629 vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr); 637 exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
630 vfp_double_dump("VDM", &vdm); 638 vfp_double_dump("VDM", &vdm);
631 639
632 /* 640 /*
@@ -892,21 +900,21 @@ static u32
892vfp_double_multiply_accumulate(ARMul_State* state, int dd, int dn, int dm, u32 fpscr, u32 negate, const char *func) 900vfp_double_multiply_accumulate(ARMul_State* state, int dd, int dn, int dm, u32 fpscr, u32 negate, const char *func)
893{ 901{
894 struct vfp_double vdd, vdp, vdn, vdm; 902 struct vfp_double vdd, vdp, vdn, vdm;
895 u32 exceptions; 903 u32 exceptions = 0;
896 904
897 vfp_double_unpack(&vdn, vfp_get_double(state, dn), &fpscr); 905 exceptions |= vfp_double_unpack(&vdn, vfp_get_double(state, dn), fpscr);
898 if (vdn.exponent == 0 && vdn.significand) 906 if (vdn.exponent == 0 && vdn.significand)
899 vfp_double_normalise_denormal(&vdn); 907 vfp_double_normalise_denormal(&vdn);
900 908
901 vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr); 909 exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
902 if (vdm.exponent == 0 && vdm.significand) 910 if (vdm.exponent == 0 && vdm.significand)
903 vfp_double_normalise_denormal(&vdm); 911 vfp_double_normalise_denormal(&vdm);
904 912
905 exceptions = vfp_double_multiply(&vdp, &vdn, &vdm, fpscr); 913 exceptions |= vfp_double_multiply(&vdp, &vdn, &vdm, fpscr);
906 if (negate & NEG_MULTIPLY) 914 if (negate & NEG_MULTIPLY)
907 vdp.sign = vfp_sign_negate(vdp.sign); 915 vdp.sign = vfp_sign_negate(vdp.sign);
908 916
909 vfp_double_unpack(&vdn, vfp_get_double(state, dd), &fpscr); 917 exceptions |= vfp_double_unpack(&vdn, vfp_get_double(state, dd), fpscr);
910 if (vdn.exponent == 0 && vdn.significand != 0) 918 if (vdn.exponent == 0 && vdn.significand != 0)
911 vfp_double_normalise_denormal(&vdn); 919 vfp_double_normalise_denormal(&vdn);
912 920
@@ -915,7 +923,8 @@ vfp_double_multiply_accumulate(ARMul_State* state, int dd, int dn, int dm, u32 f
915 923
916 exceptions |= vfp_double_add(&vdd, &vdn, &vdp, fpscr); 924 exceptions |= vfp_double_add(&vdd, &vdn, &vdp, fpscr);
917 925
918 return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, func); 926 exceptions |= vfp_double_normaliseround(state, dd, &vdd, fpscr, func);
927 return exceptions;
919} 928}
920 929
921/* 930/*
@@ -964,19 +973,21 @@ static u32 vfp_double_fnmsc(ARMul_State* state, int dd, int dn, int dm, u32 fpsc
964static u32 vfp_double_fmul(ARMul_State* state, int dd, int dn, int dm, u32 fpscr) 973static u32 vfp_double_fmul(ARMul_State* state, int dd, int dn, int dm, u32 fpscr)
965{ 974{
966 struct vfp_double vdd, vdn, vdm; 975 struct vfp_double vdd, vdn, vdm;
967 u32 exceptions; 976 u32 exceptions = 0;
968 977
969 LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__); 978 LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
970 vfp_double_unpack(&vdn, vfp_get_double(state, dn), &fpscr); 979 exceptions |= vfp_double_unpack(&vdn, vfp_get_double(state, dn), fpscr);
971 if (vdn.exponent == 0 && vdn.significand) 980 if (vdn.exponent == 0 && vdn.significand)
972 vfp_double_normalise_denormal(&vdn); 981 vfp_double_normalise_denormal(&vdn);
973 982
974 vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr); 983 exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
975 if (vdm.exponent == 0 && vdm.significand) 984 if (vdm.exponent == 0 && vdm.significand)
976 vfp_double_normalise_denormal(&vdm); 985 vfp_double_normalise_denormal(&vdm);
977 986
978 exceptions = vfp_double_multiply(&vdd, &vdn, &vdm, fpscr); 987 exceptions |= vfp_double_multiply(&vdd, &vdn, &vdm, fpscr);
979 return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fmul"); 988
989 exceptions |= vfp_double_normaliseround(state, dd, &vdd, fpscr, "fmul");
990 return exceptions;
980} 991}
981 992
982/* 993/*
@@ -985,21 +996,22 @@ static u32 vfp_double_fmul(ARMul_State* state, int dd, int dn, int dm, u32 fpscr
985static u32 vfp_double_fnmul(ARMul_State* state, int dd, int dn, int dm, u32 fpscr) 996static u32 vfp_double_fnmul(ARMul_State* state, int dd, int dn, int dm, u32 fpscr)
986{ 997{
987 struct vfp_double vdd, vdn, vdm; 998 struct vfp_double vdd, vdn, vdm;
988 u32 exceptions; 999 u32 exceptions = 0;
989 1000
990 LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__); 1001 LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
991 vfp_double_unpack(&vdn, vfp_get_double(state, dn), &fpscr); 1002 exceptions |= vfp_double_unpack(&vdn, vfp_get_double(state, dn), fpscr);
992 if (vdn.exponent == 0 && vdn.significand) 1003 if (vdn.exponent == 0 && vdn.significand)
993 vfp_double_normalise_denormal(&vdn); 1004 vfp_double_normalise_denormal(&vdn);
994 1005
995 vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr); 1006 exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
996 if (vdm.exponent == 0 && vdm.significand) 1007 if (vdm.exponent == 0 && vdm.significand)
997 vfp_double_normalise_denormal(&vdm); 1008 vfp_double_normalise_denormal(&vdm);
998 1009
999 exceptions = vfp_double_multiply(&vdd, &vdn, &vdm, fpscr); 1010 exceptions |= vfp_double_multiply(&vdd, &vdn, &vdm, fpscr);
1000 vdd.sign = vfp_sign_negate(vdd.sign); 1011 vdd.sign = vfp_sign_negate(vdd.sign);
1001 1012
1002 return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fnmul"); 1013 exceptions |= vfp_double_normaliseround(state, dd, &vdd, fpscr, "fnmul");
1014 return exceptions;
1003} 1015}
1004 1016
1005/* 1017/*
@@ -1008,20 +1020,21 @@ static u32 vfp_double_fnmul(ARMul_State* state, int dd, int dn, int dm, u32 fpsc
1008static u32 vfp_double_fadd(ARMul_State* state, int dd, int dn, int dm, u32 fpscr) 1020static u32 vfp_double_fadd(ARMul_State* state, int dd, int dn, int dm, u32 fpscr)
1009{ 1021{
1010 struct vfp_double vdd, vdn, vdm; 1022 struct vfp_double vdd, vdn, vdm;
1011 u32 exceptions; 1023 u32 exceptions = 0;
1012 1024
1013 LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__); 1025 LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
1014 vfp_double_unpack(&vdn, vfp_get_double(state, dn), &fpscr); 1026 exceptions |= vfp_double_unpack(&vdn, vfp_get_double(state, dn), fpscr);
1015 if (vdn.exponent == 0 && vdn.significand) 1027 if (vdn.exponent == 0 && vdn.significand)
1016 vfp_double_normalise_denormal(&vdn); 1028 vfp_double_normalise_denormal(&vdn);
1017 1029
1018 vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr); 1030 exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
1019 if (vdm.exponent == 0 && vdm.significand) 1031 if (vdm.exponent == 0 && vdm.significand)
1020 vfp_double_normalise_denormal(&vdm); 1032 vfp_double_normalise_denormal(&vdm);
1021 1033
1022 exceptions = vfp_double_add(&vdd, &vdn, &vdm, fpscr); 1034 exceptions |= vfp_double_add(&vdd, &vdn, &vdm, fpscr);
1023 1035
1024 return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fadd"); 1036 exceptions |= vfp_double_normaliseround(state, dd, &vdd, fpscr, "fadd");
1037 return exceptions;
1025} 1038}
1026 1039
1027/* 1040/*
@@ -1030,14 +1043,14 @@ static u32 vfp_double_fadd(ARMul_State* state, int dd, int dn, int dm, u32 fpscr
1030static u32 vfp_double_fsub(ARMul_State* state, int dd, int dn, int dm, u32 fpscr) 1043static u32 vfp_double_fsub(ARMul_State* state, int dd, int dn, int dm, u32 fpscr)
1031{ 1044{
1032 struct vfp_double vdd, vdn, vdm; 1045 struct vfp_double vdd, vdn, vdm;
1033 u32 exceptions; 1046 u32 exceptions = 0;
1034 1047
1035 LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__); 1048 LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
1036 vfp_double_unpack(&vdn, vfp_get_double(state, dn), &fpscr); 1049 exceptions |= vfp_double_unpack(&vdn, vfp_get_double(state, dn), fpscr);
1037 if (vdn.exponent == 0 && vdn.significand) 1050 if (vdn.exponent == 0 && vdn.significand)
1038 vfp_double_normalise_denormal(&vdn); 1051 vfp_double_normalise_denormal(&vdn);
1039 1052
1040 vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr); 1053 exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
1041 if (vdm.exponent == 0 && vdm.significand) 1054 if (vdm.exponent == 0 && vdm.significand)
1042 vfp_double_normalise_denormal(&vdm); 1055 vfp_double_normalise_denormal(&vdm);
1043 1056
@@ -1046,9 +1059,10 @@ static u32 vfp_double_fsub(ARMul_State* state, int dd, int dn, int dm, u32 fpscr
1046 */ 1059 */
1047 vdm.sign = vfp_sign_negate(vdm.sign); 1060 vdm.sign = vfp_sign_negate(vdm.sign);
1048 1061
1049 exceptions = vfp_double_add(&vdd, &vdn, &vdm, fpscr); 1062 exceptions |= vfp_double_add(&vdd, &vdn, &vdm, fpscr);
1050 1063
1051 return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fsub"); 1064 exceptions |= vfp_double_normaliseround(state, dd, &vdd, fpscr, "fsub");
1065 return exceptions;
1052} 1066}
1053 1067
1054/* 1068/*
@@ -1061,8 +1075,8 @@ static u32 vfp_double_fdiv(ARMul_State* state, int dd, int dn, int dm, u32 fpscr
1061 int tm, tn; 1075 int tm, tn;
1062 1076
1063 LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__); 1077 LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
1064 vfp_double_unpack(&vdn, vfp_get_double(state, dn), &fpscr); 1078 exceptions |= vfp_double_unpack(&vdn, vfp_get_double(state, dn), fpscr);
1065 vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr); 1079 exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
1066 1080
1067 vdd.sign = vdn.sign ^ vdm.sign; 1081 vdd.sign = vdn.sign ^ vdm.sign;
1068 1082
@@ -1131,16 +1145,18 @@ static u32 vfp_double_fdiv(ARMul_State* state, int dd, int dn, int dm, u32 fpscr
1131 } 1145 }
1132 vdd.significand |= (reml != 0); 1146 vdd.significand |= (reml != 0);
1133 } 1147 }
1134 return vfp_double_normaliseround(state, dd, &vdd, fpscr, 0, "fdiv"); 1148
1149 exceptions |= vfp_double_normaliseround(state, dd, &vdd, fpscr, "fdiv");
1150 return exceptions;
1135 1151
1136vdn_nan: 1152vdn_nan:
1137 exceptions = vfp_propagate_nan(&vdd, &vdn, &vdm, fpscr); 1153 exceptions |= vfp_propagate_nan(&vdd, &vdn, &vdm, fpscr);
1138pack: 1154pack:
1139 vfp_put_double(state, vfp_double_pack(&vdd), dd); 1155 vfp_put_double(state, vfp_double_pack(&vdd), dd);
1140 return exceptions; 1156 return exceptions;
1141 1157
1142vdm_nan: 1158vdm_nan:
1143 exceptions = vfp_propagate_nan(&vdd, &vdm, &vdn, fpscr); 1159 exceptions |= vfp_propagate_nan(&vdd, &vdm, &vdn, fpscr);
1144 goto pack; 1160 goto pack;
1145 1161
1146zero: 1162zero:
@@ -1149,7 +1165,7 @@ zero:
1149 goto pack; 1165 goto pack;
1150 1166
1151divzero: 1167divzero:
1152 exceptions = FPSCR_DZC; 1168 exceptions |= FPSCR_DZC;
1153infinity: 1169infinity:
1154 vdd.exponent = 2047; 1170 vdd.exponent = 2047;
1155 vdd.significand = 0; 1171 vdd.significand = 0;
@@ -1157,7 +1173,8 @@ infinity:
1157 1173
1158invalid: 1174invalid:
1159 vfp_put_double(state, vfp_double_pack(&vfp_double_default_qnan), dd); 1175 vfp_put_double(state, vfp_double_pack(&vfp_double_default_qnan), dd);
1160 return FPSCR_IOC; 1176 exceptions |= FPSCR_IOC;
1177 return exceptions;
1161} 1178}
1162 1179
1163static struct op fops[] = { 1180static struct op fops[] = {
diff --git a/src/core/arm/skyeye_common/vfp/vfpsingle.cpp b/src/core/arm/skyeye_common/vfp/vfpsingle.cpp
index e47ad2760..23e0cdf26 100644
--- a/src/core/arm/skyeye_common/vfp/vfpsingle.cpp
+++ b/src/core/arm/skyeye_common/vfp/vfpsingle.cpp
@@ -89,10 +89,11 @@ static void vfp_single_normalise_denormal(struct vfp_single *vs)
89} 89}
90 90
91 91
92u32 vfp_single_normaliseround(ARMul_State* state, int sd, struct vfp_single *vs, u32 fpscr, u32 exceptions, const char *func) 92u32 vfp_single_normaliseround(ARMul_State* state, int sd, struct vfp_single *vs, u32 fpscr, const char *func)
93{ 93{
94 u32 significand, incr, rmode; 94 u32 significand, incr, rmode;
95 int exponent, shift, underflow; 95 int exponent, shift, underflow;
96 u32 exceptions = 0;
96 97
97 vfp_single_dump("pack: in", vs); 98 vfp_single_dump("pack: in", vs);
98 99
@@ -334,8 +335,9 @@ static u32 vfp_single_fsqrt(ARMul_State* state, int sd, int unused, s32 m, u32 f
334{ 335{
335 struct vfp_single vsm, vsd, *vsp; 336 struct vfp_single vsm, vsd, *vsp;
336 int ret, tm; 337 int ret, tm;
338 u32 exceptions = 0;
337 339
338 vfp_single_unpack(&vsm, m, &fpscr); 340 exceptions |= vfp_single_unpack(&vsm, m, fpscr);
339 tm = vfp_single_type(&vsm); 341 tm = vfp_single_type(&vsm);
340 if (tm & (VFP_NAN|VFP_INFINITY)) { 342 if (tm & (VFP_NAN|VFP_INFINITY)) {
341 vsp = &vsd; 343 vsp = &vsd;
@@ -408,7 +410,8 @@ sqrt_invalid:
408 } 410 }
409 vsd.significand = vfp_shiftright32jamming(vsd.significand, 1); 411 vsd.significand = vfp_shiftright32jamming(vsd.significand, 1);
410 412
411 return vfp_single_normaliseround(state, sd, &vsd, fpscr, 0, "fsqrt"); 413 exceptions |= vfp_single_normaliseround(state, sd, &vsd, fpscr, "fsqrt");
414 return exceptions;
412} 415}
413 416
414/* 417/*
@@ -503,7 +506,7 @@ static u32 vfp_single_fcvtd(ARMul_State* state, int dd, int unused, s32 m, u32 f
503 int tm; 506 int tm;
504 u32 exceptions = 0; 507 u32 exceptions = 0;
505 508
506 vfp_single_unpack(&vsm, m, &fpscr); 509 exceptions |= vfp_single_unpack(&vsm, m, fpscr);
507 510
508 tm = vfp_single_type(&vsm); 511 tm = vfp_single_type(&vsm);
509 512
@@ -511,7 +514,7 @@ static u32 vfp_single_fcvtd(ARMul_State* state, int dd, int unused, s32 m, u32 f
511 * If we have a signalling NaN, signal invalid operation. 514 * If we have a signalling NaN, signal invalid operation.
512 */ 515 */
513 if (tm == VFP_SNAN) 516 if (tm == VFP_SNAN)
514 exceptions = FPSCR_IOC; 517 exceptions |= FPSCR_IOC;
515 518
516 if (tm & VFP_DENORMAL) 519 if (tm & VFP_DENORMAL)
517 vfp_single_normalise_denormal(&vsm); 520 vfp_single_normalise_denormal(&vsm);
@@ -532,7 +535,8 @@ static u32 vfp_single_fcvtd(ARMul_State* state, int dd, int unused, s32 m, u32 f
532 else 535 else
533 vdd.exponent = vsm.exponent + (1023 - 127); 536 vdd.exponent = vsm.exponent + (1023 - 127);
534 537
535 return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fcvtd"); 538 exceptions |= vfp_double_normaliseround(state, dd, &vdd, fpscr, "fcvtd");
539 return exceptions;
536 540
537pack_nan: 541pack_nan:
538 vfp_put_double(state, vfp_double_pack(&vdd), dd); 542 vfp_put_double(state, vfp_double_pack(&vdd), dd);
@@ -542,23 +546,27 @@ pack_nan:
542static u32 vfp_single_fuito(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr) 546static u32 vfp_single_fuito(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr)
543{ 547{
544 struct vfp_single vs; 548 struct vfp_single vs;
549 u32 exceptions = 0;
545 550
546 vs.sign = 0; 551 vs.sign = 0;
547 vs.exponent = 127 + 31 - 1; 552 vs.exponent = 127 + 31 - 1;
548 vs.significand = (u32)m; 553 vs.significand = (u32)m;
549 554
550 return vfp_single_normaliseround(state, sd, &vs, fpscr, 0, "fuito"); 555 exceptions |= vfp_single_normaliseround(state, sd, &vs, fpscr, "fuito");
556 return exceptions;
551} 557}
552 558
553static u32 vfp_single_fsito(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr) 559static u32 vfp_single_fsito(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr)
554{ 560{
555 struct vfp_single vs; 561 struct vfp_single vs;
562 u32 exceptions = 0;
556 563
557 vs.sign = (m & 0x80000000) >> 16; 564 vs.sign = (m & 0x80000000) >> 16;
558 vs.exponent = 127 + 31 - 1; 565 vs.exponent = 127 + 31 - 1;
559 vs.significand = vs.sign ? -m : m; 566 vs.significand = vs.sign ? -m : m;
560 567
561 return vfp_single_normaliseround(state, sd, &vs, fpscr, 0, "fsito"); 568 exceptions |= vfp_single_normaliseround(state, sd, &vs, fpscr, "fsito");
569 return exceptions;
562} 570}
563 571
564static u32 vfp_single_ftoui(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr) 572static u32 vfp_single_ftoui(ARMul_State* state, int sd, int unused, s32 m, u32 fpscr)
@@ -568,7 +576,7 @@ static u32 vfp_single_ftoui(ARMul_State* state, int sd, int unused, s32 m, u32 f
568 int rmode = fpscr & FPSCR_RMODE_MASK; 576 int rmode = fpscr & FPSCR_RMODE_MASK;
569 int tm; 577 int tm;
570 578
571 vfp_single_unpack(&vsm, m, &fpscr); 579 exceptions |= vfp_single_unpack(&vsm, m, fpscr);
572 vfp_single_dump("VSM", &vsm); 580 vfp_single_dump("VSM", &vsm);
573 581
574 /* 582 /*
@@ -583,7 +591,7 @@ static u32 vfp_single_ftoui(ARMul_State* state, int sd, int unused, s32 m, u32 f
583 591
584 if (vsm.exponent >= 127 + 32) { 592 if (vsm.exponent >= 127 + 32) {
585 d = vsm.sign ? 0 : 0xffffffff; 593 d = vsm.sign ? 0 : 0xffffffff;
586 exceptions = FPSCR_IOC; 594 exceptions |= FPSCR_IOC;
587 } else if (vsm.exponent >= 127) { 595 } else if (vsm.exponent >= 127) {
588 int shift = 127 + 31 - vsm.exponent; 596 int shift = 127 + 31 - vsm.exponent;
589 u32 rem, incr = 0; 597 u32 rem, incr = 0;
@@ -648,7 +656,7 @@ static u32 vfp_single_ftosi(ARMul_State* state, int sd, int unused, s32 m, u32 f
648 int rmode = fpscr & FPSCR_RMODE_MASK; 656 int rmode = fpscr & FPSCR_RMODE_MASK;
649 int tm; 657 int tm;
650 658
651 vfp_single_unpack(&vsm, m, &fpscr); 659 exceptions |= vfp_single_unpack(&vsm, m, fpscr);
652 vfp_single_dump("VSM", &vsm); 660 vfp_single_dump("VSM", &vsm);
653 661
654 /* 662 /*
@@ -774,7 +782,7 @@ vfp_single_fadd_nonnumber(struct vfp_single *vsd, struct vfp_single *vsn,
774 /* 782 /*
775 * different signs -> invalid 783 * different signs -> invalid
776 */ 784 */
777 exceptions = FPSCR_IOC; 785 exceptions |= FPSCR_IOC;
778 vsp = &vfp_single_default_qnan; 786 vsp = &vfp_single_default_qnan;
779 } else { 787 } else {
780 /* 788 /*
@@ -921,27 +929,27 @@ static u32
921vfp_single_multiply_accumulate(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr, u32 negate, const char *func) 929vfp_single_multiply_accumulate(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr, u32 negate, const char *func)
922{ 930{
923 vfp_single vsd, vsp, vsn, vsm; 931 vfp_single vsd, vsp, vsn, vsm;
924 u32 exceptions; 932 u32 exceptions = 0;
925 s32 v; 933 s32 v;
926 934
927 v = vfp_get_float(state, sn); 935 v = vfp_get_float(state, sn);
928 LOG_TRACE(Core_ARM11, "s%u = %08x", sn, v); 936 LOG_TRACE(Core_ARM11, "s%u = %08x", sn, v);
929 vfp_single_unpack(&vsn, v, &fpscr); 937 exceptions |= vfp_single_unpack(&vsn, v, fpscr);
930 if (vsn.exponent == 0 && vsn.significand) 938 if (vsn.exponent == 0 && vsn.significand)
931 vfp_single_normalise_denormal(&vsn); 939 vfp_single_normalise_denormal(&vsn);
932 940
933 vfp_single_unpack(&vsm, m, &fpscr); 941 exceptions |= vfp_single_unpack(&vsm, m, fpscr);
934 if (vsm.exponent == 0 && vsm.significand) 942 if (vsm.exponent == 0 && vsm.significand)
935 vfp_single_normalise_denormal(&vsm); 943 vfp_single_normalise_denormal(&vsm);
936 944
937 exceptions = vfp_single_multiply(&vsp, &vsn, &vsm, fpscr); 945 exceptions |= vfp_single_multiply(&vsp, &vsn, &vsm, fpscr);
938 946
939 if (negate & NEG_MULTIPLY) 947 if (negate & NEG_MULTIPLY)
940 vsp.sign = vfp_sign_negate(vsp.sign); 948 vsp.sign = vfp_sign_negate(vsp.sign);
941 949
942 v = vfp_get_float(state, sd); 950 v = vfp_get_float(state, sd);
943 LOG_TRACE(Core_ARM11, "s%u = %08x", sd, v); 951 LOG_TRACE(Core_ARM11, "s%u = %08x", sd, v);
944 vfp_single_unpack(&vsn, v, &fpscr); 952 exceptions |= vfp_single_unpack(&vsn, v, fpscr);
945 if (vsn.exponent == 0 && vsn.significand != 0) 953 if (vsn.exponent == 0 && vsn.significand != 0)
946 vfp_single_normalise_denormal(&vsn); 954 vfp_single_normalise_denormal(&vsn);
947 955
@@ -950,7 +958,8 @@ vfp_single_multiply_accumulate(ARMul_State* state, int sd, int sn, s32 m, u32 fp
950 958
951 exceptions |= vfp_single_add(&vsd, &vsn, &vsp, fpscr); 959 exceptions |= vfp_single_add(&vsd, &vsn, &vsp, fpscr);
952 960
953 return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, func); 961 exceptions |= vfp_single_normaliseround(state, sd, &vsd, fpscr, func);
962 return exceptions;
954} 963}
955 964
956/* 965/*
@@ -962,8 +971,10 @@ vfp_single_multiply_accumulate(ARMul_State* state, int sd, int sn, s32 m, u32 fp
962 */ 971 */
963static u32 vfp_single_fmac(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr) 972static u32 vfp_single_fmac(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
964{ 973{
974 u32 exceptions = 0;
965 LOG_TRACE(Core_ARM11, "s%u = %08x", sn, sd); 975 LOG_TRACE(Core_ARM11, "s%u = %08x", sn, sd);
966 return vfp_single_multiply_accumulate(state, sd, sn, m, fpscr, 0, "fmac"); 976 exceptions |= vfp_single_multiply_accumulate(state, sd, sn, m, fpscr, 0, "fmac");
977 return exceptions;
967} 978}
968 979
969/* 980/*
@@ -1000,21 +1011,23 @@ static u32 vfp_single_fnmsc(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr
1000static u32 vfp_single_fmul(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr) 1011static u32 vfp_single_fmul(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
1001{ 1012{
1002 struct vfp_single vsd, vsn, vsm; 1013 struct vfp_single vsd, vsn, vsm;
1003 u32 exceptions; 1014 u32 exceptions = 0;
1004 s32 n = vfp_get_float(state, sn); 1015 s32 n = vfp_get_float(state, sn);
1005 1016
1006 LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n); 1017 LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n);
1007 1018
1008 vfp_single_unpack(&vsn, n, &fpscr); 1019 exceptions |= vfp_single_unpack(&vsn, n, fpscr);
1009 if (vsn.exponent == 0 && vsn.significand) 1020 if (vsn.exponent == 0 && vsn.significand)
1010 vfp_single_normalise_denormal(&vsn); 1021 vfp_single_normalise_denormal(&vsn);
1011 1022
1012 vfp_single_unpack(&vsm, m, &fpscr); 1023 exceptions |= vfp_single_unpack(&vsm, m, fpscr);
1013 if (vsm.exponent == 0 && vsm.significand) 1024 if (vsm.exponent == 0 && vsm.significand)
1014 vfp_single_normalise_denormal(&vsm); 1025 vfp_single_normalise_denormal(&vsm);
1015 1026
1016 exceptions = vfp_single_multiply(&vsd, &vsn, &vsm, fpscr); 1027 exceptions |= vfp_single_multiply(&vsd, &vsn, &vsm, fpscr);
1017 return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fmul"); 1028
1029 exceptions |= vfp_single_normaliseround(state, sd, &vsd, fpscr, "fmul");
1030 return exceptions;
1018} 1031}
1019 1032
1020/* 1033/*
@@ -1023,22 +1036,24 @@ static u32 vfp_single_fmul(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
1023static u32 vfp_single_fnmul(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr) 1036static u32 vfp_single_fnmul(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
1024{ 1037{
1025 struct vfp_single vsd, vsn, vsm; 1038 struct vfp_single vsd, vsn, vsm;
1026 u32 exceptions; 1039 u32 exceptions = 0;
1027 s32 n = vfp_get_float(state, sn); 1040 s32 n = vfp_get_float(state, sn);
1028 1041
1029 LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n); 1042 LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n);
1030 1043
1031 vfp_single_unpack(&vsn, n, &fpscr); 1044 exceptions |= vfp_single_unpack(&vsn, n, fpscr);
1032 if (vsn.exponent == 0 && vsn.significand) 1045 if (vsn.exponent == 0 && vsn.significand)
1033 vfp_single_normalise_denormal(&vsn); 1046 vfp_single_normalise_denormal(&vsn);
1034 1047
1035 vfp_single_unpack(&vsm, m, &fpscr); 1048 exceptions |= vfp_single_unpack(&vsm, m, fpscr);
1036 if (vsm.exponent == 0 && vsm.significand) 1049 if (vsm.exponent == 0 && vsm.significand)
1037 vfp_single_normalise_denormal(&vsm); 1050 vfp_single_normalise_denormal(&vsm);
1038 1051
1039 exceptions = vfp_single_multiply(&vsd, &vsn, &vsm, fpscr); 1052 exceptions |= vfp_single_multiply(&vsd, &vsn, &vsm, fpscr);
1040 vsd.sign = vfp_sign_negate(vsd.sign); 1053 vsd.sign = vfp_sign_negate(vsd.sign);
1041 return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fnmul"); 1054
1055 exceptions |= vfp_single_normaliseround(state, sd, &vsd, fpscr, "fnmul");
1056 return exceptions;
1042} 1057}
1043 1058
1044/* 1059/*
@@ -1047,7 +1062,7 @@ static u32 vfp_single_fnmul(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr
1047static u32 vfp_single_fadd(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr) 1062static u32 vfp_single_fadd(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
1048{ 1063{
1049 struct vfp_single vsd, vsn, vsm; 1064 struct vfp_single vsd, vsn, vsm;
1050 u32 exceptions; 1065 u32 exceptions = 0;
1051 s32 n = vfp_get_float(state, sn); 1066 s32 n = vfp_get_float(state, sn);
1052 1067
1053 LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n); 1068 LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n);
@@ -1055,17 +1070,18 @@ static u32 vfp_single_fadd(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
1055 /* 1070 /*
1056 * Unpack and normalise denormals. 1071 * Unpack and normalise denormals.
1057 */ 1072 */
1058 vfp_single_unpack(&vsn, n, &fpscr); 1073 exceptions |= vfp_single_unpack(&vsn, n, fpscr);
1059 if (vsn.exponent == 0 && vsn.significand) 1074 if (vsn.exponent == 0 && vsn.significand)
1060 vfp_single_normalise_denormal(&vsn); 1075 vfp_single_normalise_denormal(&vsn);
1061 1076
1062 vfp_single_unpack(&vsm, m, &fpscr); 1077 exceptions |= vfp_single_unpack(&vsm, m, fpscr);
1063 if (vsm.exponent == 0 && vsm.significand) 1078 if (vsm.exponent == 0 && vsm.significand)
1064 vfp_single_normalise_denormal(&vsm); 1079 vfp_single_normalise_denormal(&vsm);
1065 1080
1066 exceptions = vfp_single_add(&vsd, &vsn, &vsm, fpscr); 1081 exceptions |= vfp_single_add(&vsd, &vsn, &vsm, fpscr);
1067 1082
1068 return vfp_single_normaliseround(state, sd, &vsd, fpscr, exceptions, "fadd"); 1083 exceptions |= vfp_single_normaliseround(state, sd, &vsd, fpscr, "fadd");
1084 return exceptions;
1069} 1085}
1070 1086
1071/* 1087/*
@@ -1095,8 +1111,8 @@ static u32 vfp_single_fdiv(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
1095 1111
1096 LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n); 1112 LOG_TRACE(Core_ARM11, "s%u = %08x", sn, n);
1097 1113
1098 vfp_single_unpack(&vsn, n, &fpscr); 1114 exceptions |= vfp_single_unpack(&vsn, n, fpscr);
1099 vfp_single_unpack(&vsm, m, &fpscr); 1115 exceptions |= vfp_single_unpack(&vsm, m, fpscr);
1100 1116
1101 vsd.sign = vsn.sign ^ vsm.sign; 1117 vsd.sign = vsn.sign ^ vsm.sign;
1102 1118
@@ -1162,16 +1178,17 @@ static u32 vfp_single_fdiv(ARMul_State* state, int sd, int sn, s32 m, u32 fpscr)
1162 if ((vsd.significand & 0x3f) == 0) 1178 if ((vsd.significand & 0x3f) == 0)
1163 vsd.significand |= ((u64)vsm.significand * vsd.significand != (u64)vsn.significand << 32); 1179 vsd.significand |= ((u64)vsm.significand * vsd.significand != (u64)vsn.significand << 32);
1164 1180
1165 return vfp_single_normaliseround(state, sd, &vsd, fpscr, 0, "fdiv"); 1181 exceptions |= vfp_single_normaliseround(state, sd, &vsd, fpscr, "fdiv");
1182 return exceptions;
1166 1183
1167vsn_nan: 1184vsn_nan:
1168 exceptions = vfp_propagate_nan(&vsd, &vsn, &vsm, fpscr); 1185 exceptions |= vfp_propagate_nan(&vsd, &vsn, &vsm, fpscr);
1169pack: 1186pack:
1170 vfp_put_float(state, vfp_single_pack(&vsd), sd); 1187 vfp_put_float(state, vfp_single_pack(&vsd), sd);
1171 return exceptions; 1188 return exceptions;
1172 1189
1173vsm_nan: 1190vsm_nan:
1174 exceptions = vfp_propagate_nan(&vsd, &vsm, &vsn, fpscr); 1191 exceptions |= vfp_propagate_nan(&vsd, &vsm, &vsn, fpscr);
1175 goto pack; 1192 goto pack;
1176 1193
1177zero: 1194zero:
@@ -1180,7 +1197,7 @@ zero:
1180 goto pack; 1197 goto pack;
1181 1198
1182divzero: 1199divzero:
1183 exceptions = FPSCR_DZC; 1200 exceptions |= FPSCR_DZC;
1184infinity: 1201infinity:
1185 vsd.exponent = 255; 1202 vsd.exponent = 255;
1186 vsd.significand = 0; 1203 vsd.significand = 0;
@@ -1188,7 +1205,8 @@ infinity:
1188 1205
1189invalid: 1206invalid:
1190 vfp_put_float(state, vfp_single_pack(&vfp_single_default_qnan), sd); 1207 vfp_put_float(state, vfp_single_pack(&vfp_single_default_qnan), sd);
1191 return FPSCR_IOC; 1208 exceptions |= FPSCR_IOC;
1209 return exceptions;
1192} 1210}
1193 1211
1194static struct op fops[] = { 1212static struct op fops[] = {