diff --git a/src/core/arm/skyeye_common/vfp/vfp_helper.h b/src/core/arm/skyeye_common/vfp/vfp_helper.h
index 210972917d..c1cc4fd7e0 100644
--- a/src/core/arm/skyeye_common/vfp/vfp_helper.h
+++ b/src/core/arm/skyeye_common/vfp/vfp_helper.h
@@ -357,8 +357,9 @@ inline int vfp_double_type(const vfp_double* s)
 // Unpack a double-precision float.  Note that this returns the magnitude
 // of the double-precision float mantissa with the 1. if necessary,
 // aligned to bit 62.
-inline void vfp_double_unpack(vfp_double* s, s64 val, u32* fpscr)
+inline u32 vfp_double_unpack(vfp_double* s, s64 val, u32 fpscr)
 {
+    u32 exceptions = 0;
     s->sign = vfp_double_packed_sign(val) >> 48;
     s->exponent = vfp_double_packed_exponent(val);
 
@@ -369,12 +370,13 @@ inline void vfp_double_unpack(vfp_double* s, s64 val, u32* fpscr)
 
     // If flush-to-zero mode is enabled, turn the denormal into zero.
     // On a VFPv2 architecture, the sign of the zero is always positive.
-    if ((*fpscr & FPSCR_FLUSH_TO_ZERO) != 0 && (vfp_double_type(s) & VFP_DENORMAL) != 0) {
+    if ((fpscr & FPSCR_FLUSH_TO_ZERO) != 0 && (vfp_double_type(s) & VFP_DENORMAL) != 0) {
         s->sign = 0;
         s->exponent = 0;
         s->significand = 0;
-        *fpscr |= FPSCR_IDC;
+        exceptions |= FPSCR_IDC;
     }
+    return exceptions;
 }
 
 // Re-pack a double-precision float. This assumes that the float is
diff --git a/src/core/arm/skyeye_common/vfp/vfpdouble.cpp b/src/core/arm/skyeye_common/vfp/vfpdouble.cpp
index 45914d4792..0182b90b81 100644
--- a/src/core/arm/skyeye_common/vfp/vfpdouble.cpp
+++ b/src/core/arm/skyeye_common/vfp/vfpdouble.cpp
@@ -291,8 +291,9 @@ static u32 vfp_double_fsqrt(ARMul_State* state, int dd, int unused, int dm, u32
     LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
     vfp_double vdm, vdd, *vdp;
     int ret, tm;
+    u32 exceptions = 0;
 
-    vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr);
+    exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
 
     tm = vfp_double_type(&vdm);
     if (tm & (VFP_NAN|VFP_INFINITY)) {
@@ -369,7 +370,9 @@ sqrt_invalid:
     }
     vdd.significand = vfp_shiftright64jamming(vdd.significand, 1);
 
-    return vfp_double_normaliseround(state, dd, &vdd, fpscr, 0, "fsqrt");
+    exceptions |= vfp_double_normaliseround(state, dd, &vdd, fpscr, 0, "fsqrt");
+
+    return exceptions;
 }
 
 /*
@@ -475,7 +478,7 @@ static u32 vfp_double_fcvts(ARMul_State* state, int sd, int unused, int dm, u32
     u32 exceptions = 0;
 
     LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
-    vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr);
+    exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
 
     tm = vfp_double_type(&vdm);
 
@@ -545,7 +548,7 @@ static u32 vfp_double_ftoui(ARMul_State* state, int sd, int unused, int dm, u32
     int tm;
 
     LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
-    vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr);
+    exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
 
     /*
      * Do we have a denormalised number?
@@ -626,7 +629,7 @@ static u32 vfp_double_ftosi(ARMul_State* state, int sd, int unused, int dm, u32
     int tm;
 
     LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
-    vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr);
+    exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
     vfp_double_dump("VDM", &vdm);
 
     /*
@@ -892,21 +895,21 @@ static u32
 vfp_double_multiply_accumulate(ARMul_State* state, int dd, int dn, int dm, u32 fpscr, u32 negate, const char *func)
 {
     struct vfp_double vdd, vdp, vdn, vdm;
-    u32 exceptions;
+    u32 exceptions = 0;
 
-    vfp_double_unpack(&vdn, vfp_get_double(state, dn), &fpscr);
+    exceptions |= vfp_double_unpack(&vdn, vfp_get_double(state, dn), fpscr);
     if (vdn.exponent == 0 && vdn.significand)
         vfp_double_normalise_denormal(&vdn);
 
-    vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr);
+    exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
     if (vdm.exponent == 0 && vdm.significand)
         vfp_double_normalise_denormal(&vdm);
 
-    exceptions = vfp_double_multiply(&vdp, &vdn, &vdm, fpscr);
+    exceptions |= vfp_double_multiply(&vdp, &vdn, &vdm, fpscr);
     if (negate & NEG_MULTIPLY)
         vdp.sign = vfp_sign_negate(vdp.sign);
 
-    vfp_double_unpack(&vdn, vfp_get_double(state, dd), &fpscr);
+    exceptions |= vfp_double_unpack(&vdn, vfp_get_double(state, dd), fpscr);
     if (vdn.exponent == 0 && vdn.significand != 0)
         vfp_double_normalise_denormal(&vdn);
 
@@ -964,18 +967,18 @@ static u32 vfp_double_fnmsc(ARMul_State* state, int dd, int dn, int dm, u32 fpsc
 static u32 vfp_double_fmul(ARMul_State* state, int dd, int dn, int dm, u32 fpscr)
 {
     struct vfp_double vdd, vdn, vdm;
-    u32 exceptions;
+    u32 exceptions = 0;
 
     LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
-    vfp_double_unpack(&vdn, vfp_get_double(state, dn), &fpscr);
+    exceptions |= vfp_double_unpack(&vdn, vfp_get_double(state, dn), fpscr);
     if (vdn.exponent == 0 && vdn.significand)
         vfp_double_normalise_denormal(&vdn);
 
-    vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr);
+    exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
     if (vdm.exponent == 0 && vdm.significand)
         vfp_double_normalise_denormal(&vdm);
 
-    exceptions = vfp_double_multiply(&vdd, &vdn, &vdm, fpscr);
+    exceptions |= vfp_double_multiply(&vdd, &vdn, &vdm, fpscr);
     return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fmul");
 }
 
@@ -985,18 +988,18 @@ static u32 vfp_double_fmul(ARMul_State* state, int dd, int dn, int dm, u32 fpscr
 static u32 vfp_double_fnmul(ARMul_State* state, int dd, int dn, int dm, u32 fpscr)
 {
     struct vfp_double vdd, vdn, vdm;
-    u32 exceptions;
+    u32 exceptions = 0;
 
     LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
-    vfp_double_unpack(&vdn, vfp_get_double(state, dn), &fpscr);
+    exceptions |= vfp_double_unpack(&vdn, vfp_get_double(state, dn), fpscr);
     if (vdn.exponent == 0 && vdn.significand)
         vfp_double_normalise_denormal(&vdn);
 
-    vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr);
+    exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
     if (vdm.exponent == 0 && vdm.significand)
         vfp_double_normalise_denormal(&vdm);
 
-    exceptions = vfp_double_multiply(&vdd, &vdn, &vdm, fpscr);
+    exceptions |= vfp_double_multiply(&vdd, &vdn, &vdm, fpscr);
     vdd.sign = vfp_sign_negate(vdd.sign);
 
     return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fnmul");
@@ -1008,18 +1011,18 @@ static u32 vfp_double_fnmul(ARMul_State* state, int dd, int dn, int dm, u32 fpsc
 static u32 vfp_double_fadd(ARMul_State* state, int dd, int dn, int dm, u32 fpscr)
 {
     struct vfp_double vdd, vdn, vdm;
-    u32 exceptions;
+    u32 exceptions = 0;
 
     LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
-    vfp_double_unpack(&vdn, vfp_get_double(state, dn), &fpscr);
+    exceptions |= vfp_double_unpack(&vdn, vfp_get_double(state, dn), fpscr);
     if (vdn.exponent == 0 && vdn.significand)
         vfp_double_normalise_denormal(&vdn);
 
-    vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr);
+    exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
     if (vdm.exponent == 0 && vdm.significand)
         vfp_double_normalise_denormal(&vdm);
 
-    exceptions = vfp_double_add(&vdd, &vdn, &vdm, fpscr);
+    exceptions |= vfp_double_add(&vdd, &vdn, &vdm, fpscr);
 
     return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fadd");
 }
@@ -1030,14 +1033,14 @@ static u32 vfp_double_fadd(ARMul_State* state, int dd, int dn, int dm, u32 fpscr
 static u32 vfp_double_fsub(ARMul_State* state, int dd, int dn, int dm, u32 fpscr)
 {
     struct vfp_double vdd, vdn, vdm;
-    u32 exceptions;
+    u32 exceptions = 0;
 
     LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
-    vfp_double_unpack(&vdn, vfp_get_double(state, dn), &fpscr);
+    exceptions |= vfp_double_unpack(&vdn, vfp_get_double(state, dn), fpscr);
     if (vdn.exponent == 0 && vdn.significand)
         vfp_double_normalise_denormal(&vdn);
 
-    vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr);
+    exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
     if (vdm.exponent == 0 && vdm.significand)
         vfp_double_normalise_denormal(&vdm);
 
@@ -1046,7 +1049,7 @@ static u32 vfp_double_fsub(ARMul_State* state, int dd, int dn, int dm, u32 fpscr
      */
     vdm.sign = vfp_sign_negate(vdm.sign);
 
-    exceptions = vfp_double_add(&vdd, &vdn, &vdm, fpscr);
+    exceptions |= vfp_double_add(&vdd, &vdn, &vdm, fpscr);
 
     return vfp_double_normaliseround(state, dd, &vdd, fpscr, exceptions, "fsub");
 }
@@ -1061,8 +1064,8 @@ static u32 vfp_double_fdiv(ARMul_State* state, int dd, int dn, int dm, u32 fpscr
     int tm, tn;
 
     LOG_TRACE(Core_ARM11, "In %s", __FUNCTION__);
-    vfp_double_unpack(&vdn, vfp_get_double(state, dn), &fpscr);
-    vfp_double_unpack(&vdm, vfp_get_double(state, dm), &fpscr);
+    exceptions |= vfp_double_unpack(&vdn, vfp_get_double(state, dn), fpscr);
+    exceptions |= vfp_double_unpack(&vdm, vfp_get_double(state, dm), fpscr);
 
     vdd.sign = vdn.sign ^ vdm.sign;
 
@@ -1134,13 +1137,13 @@ static u32 vfp_double_fdiv(ARMul_State* state, int dd, int dn, int dm, u32 fpscr
     return vfp_double_normaliseround(state, dd, &vdd, fpscr, 0, "fdiv");
 
 vdn_nan:
-    exceptions = vfp_propagate_nan(&vdd, &vdn, &vdm, fpscr);
+    exceptions |= vfp_propagate_nan(&vdd, &vdn, &vdm, fpscr);
 pack:
     vfp_put_double(state, vfp_double_pack(&vdd), dd);
     return exceptions;
 
 vdm_nan:
-    exceptions = vfp_propagate_nan(&vdd, &vdm, &vdn, fpscr);
+    exceptions |= vfp_propagate_nan(&vdd, &vdm, &vdn, fpscr);
     goto pack;
 
 zero:
@@ -1149,7 +1152,7 @@ zero:
     goto pack;
 
 divzero:
-    exceptions = FPSCR_DZC;
+    exceptions |= FPSCR_DZC;
 infinity:
     vdd.exponent = 2047;
     vdd.significand = 0;