OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1937 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1948 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); | 1948 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); |
1949 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | 1949 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); |
1950 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); | 1950 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); |
1951 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); | 1951 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); |
1952 b(hi, fail); | 1952 b(hi, fail); |
1953 } | 1953 } |
1954 | 1954 |
1955 | 1955 |
1956 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, | 1956 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, |
1957 Register key_reg, | 1957 Register key_reg, |
1958 Register receiver_reg, | |
1959 Register elements_reg, | 1958 Register elements_reg, |
1960 Register scratch1, | 1959 Register scratch1, |
1961 Register scratch2, | 1960 Register scratch2, |
1962 Register scratch3, | 1961 Register scratch3, |
1963 Register scratch4, | 1962 Register scratch4, |
1964 Label* fail) { | 1963 Label* fail, |
| 1964 int elements_offset) { |
1965 Label smi_value, maybe_nan, have_double_value, is_nan, done; | 1965 Label smi_value, maybe_nan, have_double_value, is_nan, done; |
1966 Register mantissa_reg = scratch2; | 1966 Register mantissa_reg = scratch2; |
1967 Register exponent_reg = scratch3; | 1967 Register exponent_reg = scratch3; |
1968 | 1968 |
1969 // Handle smi values specially. | 1969 // Handle smi values specially. |
1970 JumpIfSmi(value_reg, &smi_value); | 1970 JumpIfSmi(value_reg, &smi_value); |
1971 | 1971 |
1972 // Ensure that the object is a heap number | 1972 // Ensure that the object is a heap number |
1973 CheckMap(value_reg, | 1973 CheckMap(value_reg, |
1974 scratch1, | 1974 scratch1, |
1975 isolate()->factory()->heap_number_map(), | 1975 isolate()->factory()->heap_number_map(), |
1976 fail, | 1976 fail, |
1977 DONT_DO_SMI_CHECK); | 1977 DONT_DO_SMI_CHECK); |
1978 | 1978 |
1979 // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000 | 1979 // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000 |
1980 // in the exponent. | 1980 // in the exponent. |
1981 mov(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32)); | 1981 mov(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32)); |
1982 ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset)); | 1982 ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset)); |
1983 cmp(exponent_reg, scratch1); | 1983 cmp(exponent_reg, scratch1); |
1984 b(ge, &maybe_nan); | 1984 b(ge, &maybe_nan); |
1985 | 1985 |
1986 ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); | 1986 ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); |
1987 | 1987 |
1988 bind(&have_double_value); | 1988 bind(&have_double_value); |
1989 add(scratch1, elements_reg, | 1989 add(scratch1, elements_reg, |
1990 Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); | 1990 Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); |
1991 str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize)); | 1991 str(mantissa_reg, FieldMemOperand( |
1992 uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); | 1992 scratch1, FixedDoubleArray::kHeaderSize - elements_offset)); |
| 1993 uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset + |
| 1994 sizeof(kHoleNanLower32); |
1993 str(exponent_reg, FieldMemOperand(scratch1, offset)); | 1995 str(exponent_reg, FieldMemOperand(scratch1, offset)); |
1994 jmp(&done); | 1996 jmp(&done); |
1995 | 1997 |
1996 bind(&maybe_nan); | 1998 bind(&maybe_nan); |
1997 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise | 1999 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise |
1998 // it's an Infinity, and the non-NaN code path applies. | 2000 // it's an Infinity, and the non-NaN code path applies. |
1999 b(gt, &is_nan); | 2001 b(gt, &is_nan); |
2000 ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); | 2002 ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); |
2001 cmp(mantissa_reg, Operand(0)); | 2003 cmp(mantissa_reg, Operand(0)); |
2002 b(eq, &have_double_value); | 2004 b(eq, &have_double_value); |
2003 bind(&is_nan); | 2005 bind(&is_nan); |
2004 // Load canonical NaN for storing into the double array. | 2006 // Load canonical NaN for storing into the double array. |
2005 uint64_t nan_int64 = BitCast<uint64_t>( | 2007 uint64_t nan_int64 = BitCast<uint64_t>( |
2006 FixedDoubleArray::canonical_not_the_hole_nan_as_double()); | 2008 FixedDoubleArray::canonical_not_the_hole_nan_as_double()); |
2007 mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64))); | 2009 mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64))); |
2008 mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32))); | 2010 mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32))); |
2009 jmp(&have_double_value); | 2011 jmp(&have_double_value); |
2010 | 2012 |
2011 bind(&smi_value); | 2013 bind(&smi_value); |
2012 add(scratch1, elements_reg, | 2014 add(scratch1, elements_reg, |
2013 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); | 2015 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag - |
| 2016 elements_offset)); |
2014 add(scratch1, scratch1, | 2017 add(scratch1, scratch1, |
2015 Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); | 2018 Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); |
2016 // scratch1 is now effective address of the double element | 2019 // scratch1 is now effective address of the double element |
2017 | 2020 |
2018 FloatingPointHelper::Destination destination; | 2021 FloatingPointHelper::Destination destination; |
2019 if (CpuFeatures::IsSupported(VFP2)) { | 2022 if (CpuFeatures::IsSupported(VFP2)) { |
2020 destination = FloatingPointHelper::kVFPRegisters; | 2023 destination = FloatingPointHelper::kVFPRegisters; |
2021 } else { | 2024 } else { |
2022 destination = FloatingPointHelper::kCoreRegisters; | 2025 destination = FloatingPointHelper::kCoreRegisters; |
2023 } | 2026 } |
(...skipping 1857 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3881 void CodePatcher::EmitCondition(Condition cond) { | 3884 void CodePatcher::EmitCondition(Condition cond) { |
3882 Instr instr = Assembler::instr_at(masm_.pc_); | 3885 Instr instr = Assembler::instr_at(masm_.pc_); |
3883 instr = (instr & ~kCondMask) | cond; | 3886 instr = (instr & ~kCondMask) | cond; |
3884 masm_.emit(instr); | 3887 masm_.emit(instr); |
3885 } | 3888 } |
3886 | 3889 |
3887 | 3890 |
3888 } } // namespace v8::internal | 3891 } } // namespace v8::internal |
3889 | 3892 |
3890 #endif // V8_TARGET_ARCH_ARM | 3893 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |