@@ -224,6 +224,40 @@ void CheckedIntPtrToInt32::GenerateCode(MaglevAssembler* masm,
224224 Operand (std::numeric_limits<int32_t >::min ()));
225225}
226226
227+ void CheckFloat64SameValue::SetValueLocationConstraints () {
228+ UseRegister (target_input ());
229+ // We need two because LoadFPRImmediate needs to acquire one as well in the
230+ // case where value() is not 0.0 or -0.0.
231+ set_temporaries_needed ((value ().get_scalar () == 0 ) ? 1 : 2 );
232+ set_double_temporaries_needed (
233+ value ().is_nan () || (value ().get_scalar () == 0 ) ? 0 : 1 );
234+ }
235+
236+ void CheckFloat64SameValue::GenerateCode (MaglevAssembler* masm,
237+ const ProcessingState& state) {
238+ Label* fail = __ GetDeoptLabel (this , deoptimize_reason ());
239+ MaglevAssembler::TemporaryRegisterScope temps (masm);
240+ DoubleRegister target = ToDoubleRegister (target_input ());
241+ if (value ().is_nan ()) {
242+ __ JumpIfNotNan (target, fail);
243+ } else {
244+ DoubleRegister double_scratch = temps.AcquireScratchDouble ();
245+ Register scratch = temps.AcquireScratch ();
246+ __ Move (double_scratch, value ().get_scalar ());
247+ __ CompareF64 (scratch, EQ, double_scratch, target);
248+ __ BranchFalseF (scratch, fail);
249+ if (value ().get_scalar () == 0 ) { // +0.0 or -0.0.
250+ __ MacroAssembler::Move (scratch, target);
251+ __ And (scratch, scratch, Operand (1ULL << 63 ));
252+ if (value ().get_bits () == 0 ) {
253+ __ BranchTrueF (scratch, fail);
254+ } else {
255+ __ BranchFalseF (scratch, fail);
256+ }
257+ }
258+ }
259+ }
260+
227261void Int32AddWithOverflow::SetValueLocationConstraints () {
228262 UseRegister (left_input ());
229263 UseRegister (right_input ());
0 commit comments